diff --git a/.cirrus.yml b/.cirrus.yml
index ed624d958c..406304f9a2 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -145,7 +145,7 @@ ostree-rs-ext_task:
dockerfile: contrib/cirrus/ostree_ext.dockerfile
docker_arguments: # required build-args
BASE_FQIN: quay.io/coreos-assembler/fcos-buildroot:testing-devel
- CIRRUS_IMAGE_VERSION: 1
+ CIRRUS_IMAGE_VERSION: 2
env:
EXT_REPO_NAME: ostree-rs-ext
EXT_REPO_HOME: $CIRRUS_WORKING_DIR/../$EXT_REPO_NAME
diff --git a/go.mod b/go.mod
index 16b9b57130..befc0291ef 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.18
require (
github.com/containers/common v0.53.0
- github.com/containers/image/v5 v5.25.0
+ github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e
github.com/containers/ocicrypt v1.1.7
github.com/containers/storage v1.46.1
github.com/docker/distribution v2.8.1+incompatible
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
+ github.com/opencontainers/image-spec v1.1.0-rc3
github.com/opencontainers/image-tools v1.0.0-rc3
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.7.0
@@ -35,31 +35,32 @@ require (
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/docker/docker v23.0.4+incompatible // indirect
+ github.com/docker/docker v23.0.5+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
+ github.com/go-chi/chi v4.1.2+incompatible // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
+ github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/loads v0.21.2 // indirect
- github.com/go-openapi/runtime v0.25.0 // indirect
- github.com/go-openapi/spec v0.20.8 // indirect
+ github.com/go-openapi/runtime v0.26.0 // indirect
+ github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/strfmt v0.21.7 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
- github.com/go-playground/validator/v10 v10.12.0 // indirect
+ github.com/go-playground/validator/v10 v10.13.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/go-containerregistry v0.13.0 // indirect
+ github.com/google/go-containerregistry v0.14.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/trillian v1.5.1 // indirect
github.com/google/uuid v1.3.0 // indirect
@@ -72,9 +73,9 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.16.4 // indirect
- github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
- github.com/leodido/go-urn v1.2.2 // indirect
+ github.com/klauspost/compress v1.16.5 // indirect
+ github.com/klauspost/pgzip v1.2.6 // indirect
+ github.com/leodido/go-urn v1.2.3 // indirect
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
@@ -97,18 +98,19 @@ require (
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
- github.com/sigstore/fulcio v1.2.0 // indirect
- github.com/sigstore/rekor v1.1.0 // indirect
- github.com/sigstore/sigstore v1.6.0 // indirect
+ github.com/sigstore/fulcio v1.3.1 // indirect
+ github.com/sigstore/rekor v1.1.1 // indirect
+ github.com/sigstore/sigstore v1.6.4 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
- github.com/sylabs/sif/v2 v2.11.1 // indirect
+ github.com/sylabs/sif/v2 v2.11.3 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
+ github.com/transparency-dev/merkle v0.0.1 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
- github.com/vbauerster/mpb/v8 v8.3.0 // indirect
+ github.com/vbauerster/mpb/v8 v8.4.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
@@ -116,21 +118,25 @@ require (
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/otel v1.13.0 // indirect
- go.opentelemetry.io/otel/trace v1.13.0 // indirect
+ go.opentelemetry.io/otel v1.15.0 // indirect
+ go.opentelemetry.io/otel/trace v1.15.0 // indirect
+ go.uber.org/atomic v1.10.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.8.0 // indirect
- golang.org/x/mod v0.9.0 // indirect
+ golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.9.0 // indirect
- golang.org/x/oauth2 v0.6.0 // indirect
- golang.org/x/sync v0.1.0 // indirect
+ golang.org/x/oauth2 v0.7.0 // indirect
+ golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
+ k8s.io/klog/v2 v2.90.1 // indirect
)
diff --git a/go.sum b/go.sum
index c7471b69a7..b6fc90f8b9 100644
--- a/go.sum
+++ b/go.sum
@@ -18,6 +18,7 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
@@ -32,8 +33,8 @@ github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSk
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
github.com/containers/common v0.53.0 h1:Ax814cLeX5VXSnkKUdxz762g+27fJj1st4UvKoXmkKs=
github.com/containers/common v0.53.0/go.mod h1:pABPxJwlTE8oYk9/2BW0e0mumkuhJHIPsABHTGRXN3w=
-github.com/containers/image/v5 v5.25.0 h1:TJ0unmalbU+scd0i3Txap2wjGsAnv06MSCwgn6bsizk=
-github.com/containers/image/v5 v5.25.0/go.mod h1:EKvys0WVlRFkDw26R8y52TuhV9Tfn0yq2luLX6W52Ls=
+github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e h1:9rH8hFLJjmwMkNAdFfXP3O6cAODKujsTn8JurumYz6I=
+github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e/go.mod h1:fGnQk2T+xmEk1/yL9Ky6djJ2F86vBIeo6X14zZQ33iM=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U=
@@ -55,8 +56,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek=
-github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k=
+github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -76,11 +77,14 @@ github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVB
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
+github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
@@ -100,12 +104,12 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
-github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc=
-github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs=
+github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc=
+github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ=
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
-github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU=
-github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8=
+github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
@@ -123,9 +127,10 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
-github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
-github.com/go-rod/rod v0.112.6 h1:zMirUmhsBeshMWyf285BD0UGtGq54HfThLDGSjcP3lU=
+github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ=
+github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4=
+github.com/go-rod/rod v0.112.9 h1:uA/yLbB+t0UlqJcLJtK2pZrCNPzd15dOKRUEOnmnt9k=
+github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
@@ -162,6 +167,7 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -186,10 +192,11 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k=
-github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
+github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw=
+github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -233,11 +240,11 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU=
-github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
+github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0=
-github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
+github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -247,10 +254,11 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/leodido/go-urn v1.2.2 h1:7z68G0FCGvDk646jz1AelTYNYWrTNm0bEcFAo147wt4=
-github.com/leodido/go-urn v1.2.2/go.mod h1:kUaIbLZWttglzwNuG0pgsh5vuV6u2YcGBYz1hIPjtOQ=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
+github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@@ -294,8 +302,8 @@ github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8=
-github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
+github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
+github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eBSHhccVHy6lsBeM4=
github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs=
@@ -319,7 +327,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
@@ -335,7 +343,6 @@ github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef/go.mod h1:8AEUvGVi2uQ5b24BIhcr0GCcpd/RNAFWaN2CJFrWIIQ=
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
@@ -343,12 +350,12 @@ github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sigstore/fulcio v1.2.0 h1:I4H764cDbryKXkPtasUvo8bcix/7xLvkxWYWNp+JtWI=
-github.com/sigstore/fulcio v1.2.0/go.mod h1:FS7qpBvOEqs0uEh1+hJxzxtJistWN29ybLtAzFNUi0c=
-github.com/sigstore/rekor v1.1.0 h1:9fjPvW0WERE7VPtSSVSTbDLLOsrNx3RtiIeZ4/1tmDI=
-github.com/sigstore/rekor v1.1.0/go.mod h1:jEOGDGPMURBt9WR50N0rO7X8GZzLE3UQT+ln6BKJ/m0=
-github.com/sigstore/sigstore v1.6.0 h1:0fYHVoUlPU3WM8o3U1jT9SI2lqQE68XbG+qWncXaZC8=
-github.com/sigstore/sigstore v1.6.0/go.mod h1:+55pf6HZ15kf60c08W+GH95JQbAcnVyUBquQGSVdsto=
+github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
+github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
+github.com/sigstore/rekor v1.1.1 h1:JCeSss+qUHnCATmwAZh4zT9k0Frdyq0BjmRwewSfEy4=
+github.com/sigstore/rekor v1.1.1/go.mod h1:x/xK+HK08MiuJv+v4OxY/Oo3bhuz1DtJXNJrV7hrzvs=
+github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE=
+github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -379,8 +386,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY=
-github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ=
+github.com/sylabs/sif/v2 v2.11.3 h1:EQxi5zl6i5DsbVal9HHpk/zuSx7aNLeZBy8vmvFz838=
+github.com/sylabs/sif/v2 v2.11.3/go.mod h1:0ryivqvvsncJOJjU5QQIEc77a5zKK46F+urBXMdA07w=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
@@ -391,6 +398,8 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
+github.com/transparency-dev/merkle v0.0.1 h1:T9/9gYB8uZl7VOJIhdwjALeRWlxUxSfDEysjfmx+L9E=
+github.com/transparency-dev/merkle v0.0.1/go.mod h1:B8FIw5LTq6DaULoHsVFRzYIUDkl8yuSwCdZnOZGKL/A=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
@@ -398,8 +407,8 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
-github.com/vbauerster/mpb/v8 v8.3.0 h1:xw2eMJ6v5NP8Rd7yOVzU6OqnRPrS1yWAoLTrWe7W4Nc=
-github.com/vbauerster/mpb/v8 v8.3.0/go.mod h1:bngtYUAu25QGxcYYglsF6oyoHlC9Yhh582xF9LjfmL4=
+github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo=
+github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
@@ -417,6 +426,7 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/ysmood/fetchup v0.2.2 h1:Qn8/q5uDW7szclt4sVXCFJ1TXup3hogz94OaLf6kloo=
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
@@ -435,12 +445,18 @@ go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdH
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y=
-go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg=
-go.opentelemetry.io/otel/sdk v1.13.0 h1:BHib5g8MvdqS65yo2vV1s6Le42Hm6rrw08qU6yz5JaM=
-go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY=
-go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds=
+go.opentelemetry.io/otel v1.15.0 h1:NIl24d4eiLJPM0vKn4HjLYM+UZf6gSfi9Z+NmCxkWbk=
+go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek=
+go.opentelemetry.io/otel/sdk v1.15.0 h1:jZTCkRRd08nxD6w7rIaZeDNGZGGQstH3SfLQ3ZsKICk=
+go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo=
+go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4=
+go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
@@ -462,8 +478,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
-golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -485,8 +501,8 @@ golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
-golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
-golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
+golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -496,8 +512,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -539,7 +555,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -566,8 +582,8 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
-google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
@@ -616,4 +632,5 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
+k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
+k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go
index 96674ddbb8..f45b97f56c 100644
--- a/vendor/github.com/containers/image/v5/copy/blob.go
+++ b/vendor/github.com/containers/image/v5/copy/blob.go
@@ -43,7 +43,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
stream.reader = bar.ProxyReader(stream.reader)
// === Decrypt the stream, if required.
- decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo)
+ decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo)
if err != nil {
return types.BlobInfo{}, err
}
@@ -78,7 +78,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
// Before relaxing this, see the original pull request’s review if there are other reasons to reject this.
return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
}
- encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
+ encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
if err != nil {
return types.BlobInfo{}, err
}
diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go
index 54aca9e572..86fadff66e 100644
--- a/vendor/github.com/containers/image/v5/copy/encryption.go
+++ b/vendor/github.com/containers/image/v5/copy/encryption.go
@@ -33,28 +33,33 @@ type bpDecryptionStepData struct {
// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
// srcInfo is only used for error messages.
// Returns data for other steps; the caller should eventually use updateCryptoOperation.
-func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
- if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil {
- desc := imgspecv1.Descriptor{
- Annotations: stream.info.Annotations,
- }
- reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
- if err != nil {
- return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
- }
-
- stream.reader = reader
- stream.info.Digest = decryptedDigest
- stream.info.Size = -1
- maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
- return strings.HasPrefix(k, "org.opencontainers.image.enc")
- })
+func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
+ if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil {
return &bpDecryptionStepData{
- decrypting: true,
+ decrypting: false,
}, nil
}
+
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
+ }
+
+ desc := imgspecv1.Descriptor{
+ Annotations: stream.info.Annotations,
+ }
+ reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false)
+ if err != nil {
+ return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = decryptedDigest
+ stream.info.Size = -1
+ maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
+ return strings.HasPrefix(k, "org.opencontainers.image.enc")
+ })
return &bpDecryptionStepData{
- decrypting: false,
+ decrypting: true,
}, nil
}
@@ -74,34 +79,39 @@ type bpEncryptionStepData struct {
// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
// srcInfo is primarily used for error messages.
// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
-func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
+func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
- if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
- var annotations map[string]string
- if !decryptionStep.decrypting {
- annotations = srcInfo.Annotations
- }
- desc := imgspecv1.Descriptor{
- MediaType: srcInfo.MediaType,
- Digest: srcInfo.Digest,
- Size: srcInfo.Size,
- Annotations: annotations,
- }
- reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
- if err != nil {
- return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
- }
-
- stream.reader = reader
- stream.info.Digest = ""
- stream.info.Size = -1
+ if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil {
return &bpEncryptionStepData{
- encrypting: true,
- finalizer: finalizer,
+ encrypting: false,
}, nil
}
+
+ if ic.cannotModifyManifestReason != "" {
+ return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason)
+ }
+
+ var annotations map[string]string
+ if !decryptionStep.decrypting {
+ annotations = srcInfo.Annotations
+ }
+ desc := imgspecv1.Descriptor{
+ MediaType: srcInfo.MediaType,
+ Digest: srcInfo.Digest,
+ Size: srcInfo.Size,
+ Annotations: annotations,
+ }
+ reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc)
+ if err != nil {
+ return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
+ }
+
+ stream.reader = reader
+ stream.info.Digest = ""
+ stream.info.Size = -1
return &bpEncryptionStepData{
- encrypting: false,
+ encrypting: true,
+ finalizer: finalizer,
}, nil
}
diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go
index a35ea42205..6f01cf5cc3 100644
--- a/vendor/github.com/containers/image/v5/copy/manifest.go
+++ b/vendor/github.com/containers/image/v5/copy/manifest.go
@@ -9,6 +9,7 @@ import (
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
+ v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
@@ -18,6 +19,9 @@ import (
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
+// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption.
+var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest}
+
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
type orderedSet struct {
list []string
@@ -76,11 +80,14 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
}
- if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
- return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
- preferredMIMEType: srcType,
- otherMIMETypeCandidates: []string{},
- }, nil
+ if len(destSupportedManifestMIMETypes) == 0 {
+ if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) {
+ return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
+ preferredMIMEType: srcType,
+ otherMIMETypeCandidates: []string{},
+ }, nil
+ }
+ destSupportedManifestMIMETypes = ociEncryptionMIMETypes
}
supportedByDest := set.New[string]()
for _, t := range destSupportedManifestMIMETypes {
@@ -88,6 +95,27 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
supportedByDest.Add(t)
}
}
+ if supportedByDest.Empty() {
+ if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes
+ return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty")
+ }
+ // We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved.
+ if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption.
+ return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting")
+ }
+ // destSupportedManifestMIMETypes has three possible origins:
+ if in.forceManifestMIMEType != "" { // 1. forceManifestType specified
+ return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption",
+ in.forceManifestMIMEType)
+ }
+ if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes
+ // Coverage: This should never happen, ociEncryptionMIMETypes all support encryption
+ return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well")
+ }
+ // 3. destination does not support encryption.
+ return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption",
+ strings.Join(destSupportedManifestMIMETypes, ", "))
+ }
// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
// So, build a list of types to try in order of decreasing preference.
@@ -122,11 +150,13 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
// Finally, try anything else the destination supports.
for _, t := range destSupportedManifestMIMETypes {
- prioritizedTypes.append(t)
+ if supportedByDest.Contains(t) {
+ prioritizedTypes.append(t)
+ }
}
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
- if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
+ if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen.
return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
}
res := manifestConversionPlan{
diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go
index 7d2a98d684..2c245f54f9 100644
--- a/vendor/github.com/containers/image/v5/docker/daemon/client.go
+++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go
@@ -21,33 +21,49 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
host = sys.DockerDaemonHost
}
- // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient.
- // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s
- // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket
- // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport.
+ opts := []dockerclient.Opt{
+ dockerclient.WithHost(host),
+ dockerclient.WithVersion(defaultAPIVersion),
+ }
+
+ // We conditionalize building the TLS configuration only to TLS sockets:
+ //
+ // The dockerclient.Client implementation differentiates between
+ // - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows)
+ // - Client.scheme, which is what is sent over the connection (HTTP with/without TLS).
+ //
+ // Only Client.proto is set from the URL in dockerclient.WithHost(),
+ // Client.scheme is detected based on a http.Client.TLSClientConfig presence;
+ // dockerclient.WithHTTPClient with a client that has TLSClientConfig set
+ // will, by default, trigger an attempt to use TLS.
+ //
+ // So, don’t use WithHTTPClient for unix:// sockets at all.
//
- // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client.
+ // Similarly, if we want to communicate over plain HTTP on a TCP socket (http://),
+ // we also should not set TLSClientConfig. We continue to use WithHTTPClient
+ // with our slightly non-default settings to avoid a behavior change on updates of c/image.
//
- // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set
- // TLSClientConfig to nil. This can be achieved by using the form `http://`
+ // Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic
+ // explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ;
+ // so that would not be any simpler.
serverURL, err := dockerclient.ParseHostURL(host)
if err != nil {
return nil, err
}
- var httpClient *http.Client
- if serverURL.Scheme != "unix" {
- if serverURL.Scheme == "http" {
- httpClient = httpConfig()
- } else {
- hc, err := tlsConfig(sys)
- if err != nil {
- return nil, err
- }
- httpClient = hc
+ switch serverURL.Scheme {
+ case "unix": // Nothing
+ case "http":
+ hc := httpConfig()
+ opts = append(opts, dockerclient.WithHTTPClient(hc))
+ default:
+ hc, err := tlsConfig(sys)
+ if err != nil {
+ return nil, err
}
+ opts = append(opts, dockerclient.WithHTTPClient(hc))
}
- return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil)
+ return dockerclient.NewClientWithOpts(opts...)
}
func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
index 15c9c22793..c3234c377b 100644
--- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
@@ -226,9 +226,9 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.Ma
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
switch m.m.LayersDescriptors[idx].MediaType {
case manifest.DockerV2Schema2ForeignLayerMediaType:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
- layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
case manifest.DockerV2Schema2LayerMediaType:
diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go
index 4b74de3e58..166daa0e87 100644
--- a/vendor/github.com/containers/image/v5/internal/image/oci.go
+++ b/vendor/github.com/containers/image/v5/internal/image/oci.go
@@ -215,11 +215,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
for idx := range layers {
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
switch layers[idx].MediaType {
- case imgspecv1.MediaTypeImageLayerNonDistributable:
+ case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
- case imgspecv1.MediaTypeImageLayerNonDistributableGzip:
+ case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
- case imgspecv1.MediaTypeImageLayerNonDistributableZstd:
+ case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
case imgspecv1.MediaTypeImageLayer:
layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go
index eb23547680..a70470d99a 100644
--- a/vendor/github.com/containers/image/v5/manifest/oci.go
+++ b/vendor/github.com/containers/image/v5/manifest/oci.go
@@ -42,7 +42,12 @@ type OCI1 struct {
// useful for validation anyway.
func SupportedOCI1MediaType(m string) error {
switch m {
- case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
+ case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig,
+ imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd,
+ imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ imgspecv1.MediaTypeImageManifest,
+ imgspecv1.MediaTypeLayoutHeader,
+ ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
return nil
default:
return fmt.Errorf("unsupported OCIv1 media type: %q", m)
@@ -102,9 +107,9 @@ func (m *OCI1) LayerInfos() []LayerInfo {
var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
{
- mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
- compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip,
- compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd,
+ mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
+ compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
},
{
mtsUncompressed: imgspecv1.MediaTypeImageLayer,
@@ -166,7 +171,8 @@ func getEncryptedMediaType(mediatype string) (string, error) {
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
- case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
+ case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer,
+ imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
return mediatype + "+encrypted", nil
}
diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
index 4a4ab9b2c6..6586b84402 100644
--- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
+++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
@@ -188,14 +188,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
return index.Manifests[0], nil
} else {
// if image specified, look through all manifests for a match
+ var unsupportedMIMETypes []string
for _, md := range index.Manifests {
- if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex {
- continue
- }
if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
- return md, nil
+ if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
+ return md, nil
+ }
+ unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
}
}
+ if len(unsupportedMIMETypes) != 0 {
+ return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
+ }
}
return imgspecv1.Descriptor{}, ImageNotFoundError{ref}
}
diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go
index b645f80dd0..1f6ab7f3b9 100644
--- a/vendor/github.com/containers/image/v5/sif/src.go
+++ b/vendor/github.com/containers/image/v5/sif/src.go
@@ -96,9 +96,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere
created := sifImg.ModifiedAt()
config := imgspecv1.Image{
- Created: &created,
- Architecture: sifImg.PrimaryArch(),
- OS: "linux",
+ Created: &created,
+ Platform: imgspecv1.Platform{
+ Architecture: sifImg.PrimaryArch(),
+ OS: "linux",
+ },
Config: imgspecv1.ImageConfig{
Cmd: commandLine,
},
@@ -180,7 +182,7 @@ func (s *sifImageSource) Close() error {
func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
switch info.Digest {
case s.configDigest:
- return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
+ return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil
case s.layerDigest:
reader, err := os.Open(s.layerFile)
if err != nil {
diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
index 0fb3a83934..6f9bfaf756 100644
--- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go
@@ -19,7 +19,6 @@ import (
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
)
type tarballImageSource struct {
@@ -29,42 +28,46 @@ type tarballImageSource struct {
impl.DoesNotAffectLayerInfosForCopy
stubs.NoGetBlobAtInitialize
- reference tarballReference
- filenames []string
- diffIDs []digest.Digest
- diffSizes []int64
- blobIDs []digest.Digest
- blobSizes []int64
- blobTypes []string
- config []byte
- configID digest.Digest
- configSize int64
- manifest []byte
+ reference tarballReference
+ blobs map[digest.Digest]tarballBlob
+ manifest []byte
+}
+
+// tarballBlob is a blob that tarballImagSource can return by GetBlob.
+type tarballBlob struct {
+ contents []byte // or nil to read from filename below
+ filename string // valid if contents == nil
+ size int64
}
func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- // Gather up the digests, sizes, and date information for all of the files.
- filenames := []string{}
+ // Pick up the layer comment from the configuration's history list, if one is set.
+ comment := "imported from tarball"
+ if len(r.config.History) > 0 && r.config.History[0].Comment != "" {
+ comment = r.config.History[0].Comment
+ }
+
+ // Gather up the digests, sizes, and history information for all of the files.
+ blobs := map[digest.Digest]tarballBlob{}
diffIDs := []digest.Digest{}
- diffSizes := []int64{}
- blobIDs := []digest.Digest{}
- blobSizes := []int64{}
- blobTimes := []time.Time{}
- blobTypes := []string{}
+ created := time.Time{}
+ history := []imgspecv1.History{}
+ layerDescriptors := []imgspecv1.Descriptor{}
for _, filename := range r.filenames {
- var file *os.File
- var err error
- var blobSize int64
- var blobTime time.Time
var reader io.Reader
+ var blobTime time.Time
+ var blob tarballBlob
if filename == "-" {
- blobSize = int64(len(r.stdin))
- blobTime = time.Now()
reader = bytes.NewReader(r.stdin)
+ blobTime = time.Now()
+ blob = tarballBlob{
+ contents: r.stdin,
+ size: int64(len(r.stdin)),
+ }
} else {
- file, err = os.Open(filename)
+ file, err := os.Open(filename)
if err != nil {
- return nil, fmt.Errorf("error opening %q for reading: %w", filename, err)
+ return nil, err
}
defer file.Close()
reader = file
@@ -72,8 +75,11 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
if err != nil {
return nil, fmt.Errorf("error reading size of %q: %w", filename, err)
}
- blobSize = fileinfo.Size()
blobTime = fileinfo.ModTime()
+ blob = tarballBlob{
+ filename: filename,
+ size: fileinfo.Size(),
+ }
}
// Default to assuming the layer is compressed.
@@ -96,8 +102,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
uncompressed = nil
}
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
- n, err := io.Copy(io.Discard, reader)
- if err != nil {
+ if _, err := io.Copy(io.Discard, reader); err != nil {
return nil, fmt.Errorf("error reading %q: %v", filename, err)
}
if uncompressed != nil {
@@ -105,38 +110,26 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
}
// Grab our uncompressed and possibly-compressed digests and sizes.
- filenames = append(filenames, filename)
- diffIDs = append(diffIDs, diffIDdigester.Digest())
- diffSizes = append(diffSizes, n)
- blobIDs = append(blobIDs, blobIDdigester.Digest())
- blobSizes = append(blobSizes, blobSize)
- blobTimes = append(blobTimes, blobTime)
- blobTypes = append(blobTypes, layerType)
- }
+ diffID := diffIDdigester.Digest()
+ blobID := blobIDdigester.Digest()
+ diffIDs = append(diffIDs, diffID)
+ blobs[blobID] = blob
- // Build the rootfs and history for the configuration blob.
- rootfs := imgspecv1.RootFS{
- Type: "layers",
- DiffIDs: diffIDs,
- }
- created := time.Time{}
- history := []imgspecv1.History{}
- // Pick up the layer comment from the configuration's history list, if one is set.
- comment := "imported from tarball"
- if len(r.config.History) > 0 && r.config.History[0].Comment != "" {
- comment = r.config.History[0].Comment
- }
- for i := range diffIDs {
- createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator)
history = append(history, imgspecv1.History{
- Created: &blobTimes[i],
- CreatedBy: createdBy,
+ Created: &blobTime,
+ CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator),
Comment: comment,
})
// Use the mtime of the most recently modified file as the image's creation time.
- if created.Before(blobTimes[i]) {
- created = blobTimes[i]
+ if created.Before(blobTime) {
+ created = blobTime
}
+
+ layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{
+ Digest: blobID,
+ Size: blob.size,
+ MediaType: layerType,
+ })
}
// Pick up other defaults from the config in the reference.
@@ -150,7 +143,10 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
if config.OS == "" {
config.OS = runtime.GOOS
}
- config.RootFS = rootfs
+ config.RootFS = imgspecv1.RootFS{
+ Type: "layers",
+ DiffIDs: diffIDs,
+ }
config.History = history
// Encode and digest the image configuration blob.
@@ -159,24 +155,19 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
}
configID := digest.Canonical.FromBytes(configBytes)
- configSize := int64(len(configBytes))
-
- // Populate a manifest with the configuration blob and the file as the single layer.
- layerDescriptors := []imgspecv1.Descriptor{}
- for i := range blobIDs {
- layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{
- Digest: blobIDs[i],
- Size: blobSizes[i],
- MediaType: blobTypes[i],
- })
+ blobs[configID] = tarballBlob{
+ contents: configBytes,
+ size: int64(len(configBytes)),
}
+
+ // Populate a manifest with the configuration blob and the layers.
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{
SchemaVersion: 2,
},
Config: imgspecv1.Descriptor{
Digest: configID,
- Size: configSize,
+ Size: int64(len(configBytes)),
MediaType: imgspecv1.MediaTypeImageConfig,
},
Layers: layerDescriptors,
@@ -196,17 +187,9 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(r),
- reference: *r,
- filenames: filenames,
- diffIDs: diffIDs,
- diffSizes: diffSizes,
- blobIDs: blobIDs,
- blobSizes: blobSizes,
- blobTypes: blobTypes,
- config: configBytes,
- configID: configID,
- configSize: configSize,
- manifest: manifestBytes,
+ reference: *r,
+ blobs: blobs,
+ manifest: manifestBytes,
}
src.Compat = impl.AddCompat(src)
@@ -221,24 +204,18 @@ func (is *tarballImageSource) Close() error {
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
- // We should only be asked about things in the manifest. Maybe the configuration blob.
- if blobinfo.Digest == is.configID {
- return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
- }
- // Maybe one of the layer blobs.
- i := slices.Index(is.blobIDs, blobinfo.Digest)
- if i == -1 {
+ blob, ok := is.blobs[blobinfo.Digest]
+ if !ok {
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
- // We want to read that layer: open the file or memory block and hand it back.
- if is.filenames[i] == "-" {
- return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
+ if blob.contents != nil {
+ return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil
}
- reader, err := os.Open(is.filenames[i])
+ reader, err := os.Open(blob.filename)
if err != nil {
- return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
+ return nil, -1, err
}
- return reader, is.blobSizes[i], nil
+ return reader, blob.size, nil
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index 16a6d58169..38df865b43 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 25
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 0
+ VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = ""
+ VersionDev = "-dev"
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
index 26a0fa2756..09ea4851ff 100644
--- a/vendor/github.com/docker/docker/client/client.go
+++ b/vendor/github.com/docker/docker/client/client.go
@@ -6,9 +6,10 @@ https://docs.docker.com/engine/api/
# Usage
-You use the library by creating a client object and calling methods on it. The
-client can be created either from environment variables with NewClientWithOpts(client.FromEnv),
-or configured manually with NewClient().
+You use the library by constructing a client object using [NewClientWithOpts]
+and calling methods on it. The client can be configured from environment
+variables by passing the [FromEnv] option, or configured manually by passing any
+of the other available [Opts].
For example, to list running containers (the equivalent of "docker ps"):
diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go
index 54cdfc29a8..9e366ce20d 100644
--- a/vendor/github.com/docker/docker/client/client_deprecated.go
+++ b/vendor/github.com/docker/docker/client/client_deprecated.go
@@ -9,7 +9,11 @@ import "net/http"
// It won't send any version information if the version number is empty. It is
// highly recommended that you set a version or your client may break if the
// server is upgraded.
-// Deprecated: use NewClientWithOpts
+//
+// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion],
+// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API
+// version negotiation by passing the [WithAPIVersionNegotiation] option instead
+// of WithVersion.
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders))
}
@@ -17,7 +21,7 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
// NewEnvClient initializes a new API client based on environment variables.
// See FromEnv for a list of support environment variables.
//
-// Deprecated: use NewClientWithOpts(FromEnv)
+// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option.
func NewEnvClient() (*Client, error) {
return NewClientWithOpts(FromEnv)
}
diff --git a/vendor/github.com/go-chi/chi/.gitignore b/vendor/github.com/go-chi/chi/.gitignore
new file mode 100644
index 0000000000..ba22c99a99
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/.gitignore
@@ -0,0 +1,3 @@
+.idea
+*.sw?
+.vscode
diff --git a/vendor/github.com/go-chi/chi/.travis.yml b/vendor/github.com/go-chi/chi/.travis.yml
new file mode 100644
index 0000000000..7b8e26bcee
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+go:
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - 1.14.x
+
+script:
+ - go get -d -t ./...
+ - go vet ./...
+ - go test ./...
+ - >
+ go_version=$(go version);
+ if [ ${go_version:13:4} = "1.12" ]; then
+ go get -u golang.org/x/tools/cmd/goimports;
+ goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :;
+ fi
+
diff --git a/vendor/github.com/go-chi/chi/CHANGELOG.md b/vendor/github.com/go-chi/chi/CHANGELOG.md
new file mode 100644
index 0000000000..9a64a72eec
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/CHANGELOG.md
@@ -0,0 +1,190 @@
+# Changelog
+
+## v4.1.2 (2020-06-02)
+
+- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution
+- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution
+- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2
+
+
+## v4.1.1 (2020-04-16)
+
+- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp
+ route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix!
+- new middleware.RouteHeaders as a simple router for request headers with wildcard support
+- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1
+
+
+## v4.1.0 (2020-04-1)
+
+- middleware.LogEntry: Write method on interface now passes the response header
+ and an extra interface type useful for custom logger implementations.
+- middleware.WrapResponseWriter: minor fix
+- middleware.Recoverer: a bit prettier
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0
+
+
+## v4.0.4 (2020-03-24)
+
+- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496)
+- a few minor improvements and fixes
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4
+
+
+## v4.0.3 (2020-01-09)
+
+- core: fix regexp routing to include default value when param is not matched
+- middleware: rewrite of middleware.Compress
+- middleware: suppress http.ErrAbortHandler in middleware.Recoverer
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3
+
+
+## v4.0.2 (2019-02-26)
+
+- Minor fixes
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2
+
+
+## v4.0.1 (2019-01-21)
+
+- Fixes issue with compress middleware: #382 #385
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1
+
+
+## v4.0.0 (2019-01-10)
+
+- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
+- router: respond with 404 on router with no routes (#362)
+- router: additional check to ensure wildcard is at the end of a url pattern (#333)
+- middleware: deprecate use of http.CloseNotifier (#347)
+- middleware: fix RedirectSlashes to include query params on redirect (#334)
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
+
+
+## v3.3.4 (2019-01-07)
+
+- Minor middleware improvements. No changes to core library/router. Moving v3 into its
+- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
+
+
+## v3.3.3 (2018-08-27)
+
+- Minor release
+- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
+
+
+## v3.3.2 (2017-12-22)
+
+- Support to route trailing slashes on mounted sub-routers (#281)
+- middleware: new `ContentCharset` to check matching charsets. Thank you
+ @csucu for your community contribution!
+
+
+## v3.3.1 (2017-11-20)
+
+- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
+- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
+- Minor bug fixes
+
+
+## v3.3.0 (2017-10-10)
+
+- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
+- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
+
+
+## v3.2.1 (2017-08-31)
+
+- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
+ and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
+- Add new `RouteMethod` to `*Context`
+- Add new `Routes` pointer to `*Context`
+- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
+- Updated benchmarks (see README)
+
+
+## v3.1.5 (2017-08-02)
+
+- Setup golint and go vet for the project
+- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
+ to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
+
+
+## v3.1.0 (2017-07-10)
+
+- Fix a few minor issues after v3 release
+- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
+- Move `render` sub-pkg to https://github.com/go-chi/render
+- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
+ suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
+ https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
+
+
+## v3.0.0 (2017-06-21)
+
+- Major update to chi library with many exciting updates, but also some *breaking changes*
+- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
+ `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
+ same router
+- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
+ `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
+- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
+ `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
+ in `_examples/custom-handler`
+- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
+ own using file handler with the stdlib, see `_examples/fileserver` for an example
+- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
+- Moved the chi project to its own organization, to allow chi-related community packages to
+ be easily discovered and supported, at: https://github.com/go-chi
+- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
+- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
+
+
+## v2.1.0 (2017-03-30)
+
+- Minor improvements and update to the chi core library
+- Introduced a brand new `chi/render` sub-package to complete the story of building
+ APIs to offer a pattern for managing well-defined request / response payloads. Please
+ check out the updated `_examples/rest` example for how it works.
+- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
+
+
+## v2.0.0 (2017-01-06)
+
+- After many months of v2 being in an RC state with many companies and users running it in
+ production, the inclusion of some improvements to the middlewares, we are very pleased to
+ announce v2.0.0 of chi.
+
+
+## v2.0.0-rc1 (2016-07-26)
+
+- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular
+ community `"net/context"` package has been included in the standard library as `"context"` and
+ utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
+ request-scoped values. We're very excited about the new context addition and are proud to
+ introduce chi v2, a minimal and powerful routing package for building large HTTP services,
+ with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
+ stdlib HTTP handlers and middlwares.
+- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
+- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
+- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
+ which provides direct access to URL routing parameters, the routing path and the matching
+ routing patterns.
+- Users upgrading from chi v1 to v2, need to:
+ 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
+ the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
+ 2. Use `chi.URLParam(r *http.Request, paramKey string) string`
+ or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
+
+
+## v1.0.0 (2016-07-01)
+
+- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
+
+
+## v0.9.0 (2016-03-31)
+
+- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
+- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
+ has changed to: `chi.URLParam(ctx, "id")`
diff --git a/vendor/github.com/go-chi/chi/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/CONTRIBUTING.md
new file mode 100644
index 0000000000..c0ac2dfe85
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing
+
+## Prerequisites
+
+1. [Install Go][go-install].
+2. Download the sources and switch the working directory:
+
+ ```bash
+ go get -u -d github.com/go-chi/chi
+ cd $GOPATH/src/github.com/go-chi/chi
+ ```
+
+## Submitting a Pull Request
+
+A typical workflow is:
+
+1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip]
+2. [Create a topic branch.][branch]
+3. Add tests for your change.
+4. Run `go test`. If your tests pass, return to the step 3.
+5. Implement the change and ensure the steps from the previous step pass.
+6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
+7. [Add, commit and push your changes.][git-help]
+8. [Submit a pull request.][pull-req]
+
+[go-install]: https://golang.org/doc/install
+[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html
+[fork]: https://help.github.com/articles/fork-a-repo
+[branch]: http://learn.github.com/p/branching.html
+[git-help]: https://guides.github.com
+[pull-req]: https://help.github.com/articles/using-pull-requests
diff --git a/vendor/github.com/go-chi/chi/LICENSE b/vendor/github.com/go-chi/chi/LICENSE
new file mode 100644
index 0000000000..d99f02ffac
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/go-chi/chi/README.md b/vendor/github.com/go-chi/chi/README.md
new file mode 100644
index 0000000000..5a8fc9d096
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/README.md
@@ -0,0 +1,441 @@
+#
+
+
+[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis]
+
+`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
+especially good at helping you write large REST API services that are kept maintainable as your
+project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
+handle signaling, cancelation and request-scoped values across a handler chain.
+
+The focus of the project has been to seek out an elegant and comfortable design for writing
+REST API servers, written during the development of the Pressly API service that powers our
+public API service, which in turn powers all of our client-side applications.
+
+The key considerations of chi's design are: project structure, maintainability, standard http
+handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
+parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
+included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
+
+## Install
+
+`go get -u github.com/go-chi/chi`
+
+
+## Features
+
+* **Lightweight** - cloc'd in ~1000 LOC for the chi router
+* **Fast** - yes, see [benchmarks](#benchmarks)
+* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
+* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting
+* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts
+* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
+* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
+* **No external dependencies** - plain ol' Go stdlib + net/http
+
+
+## Examples
+
+See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
+
+
+**As easy as:**
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/go-chi/chi"
+ "github.com/go-chi/chi/middleware"
+)
+
+func main() {
+ r := chi.NewRouter()
+ r.Use(middleware.Logger)
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("welcome"))
+ })
+ http.ListenAndServe(":3000", r)
+}
+```
+
+**REST Preview:**
+
+Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs
+in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
+Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
+
+I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
+above, they will show you all the features of chi and serve as a good form of documentation.
+
+```go
+import (
+ //...
+ "context"
+ "github.com/go-chi/chi"
+ "github.com/go-chi/chi/middleware"
+)
+
+func main() {
+ r := chi.NewRouter()
+
+ // A good base middleware stack
+ r.Use(middleware.RequestID)
+ r.Use(middleware.RealIP)
+ r.Use(middleware.Logger)
+ r.Use(middleware.Recoverer)
+
+ // Set a timeout value on the request context (ctx), that will signal
+ // through ctx.Done() that the request has timed out and further
+ // processing should be stopped.
+ r.Use(middleware.Timeout(60 * time.Second))
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("hi"))
+ })
+
+ // RESTy routes for "articles" resource
+ r.Route("/articles", func(r chi.Router) {
+ r.With(paginate).Get("/", listArticles) // GET /articles
+ r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
+
+ r.Post("/", createArticle) // POST /articles
+ r.Get("/search", searchArticles) // GET /articles/search
+
+ // Regexp url parameters:
+ r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
+
+ // Subrouters:
+ r.Route("/{articleID}", func(r chi.Router) {
+ r.Use(ArticleCtx)
+ r.Get("/", getArticle) // GET /articles/123
+ r.Put("/", updateArticle) // PUT /articles/123
+ r.Delete("/", deleteArticle) // DELETE /articles/123
+ })
+ })
+
+ // Mount the admin sub-router
+ r.Mount("/admin", adminRouter())
+
+ http.ListenAndServe(":3333", r)
+}
+
+func ArticleCtx(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ articleID := chi.URLParam(r, "articleID")
+ article, err := dbGetArticle(articleID)
+ if err != nil {
+ http.Error(w, http.StatusText(404), 404)
+ return
+ }
+ ctx := context.WithValue(r.Context(), "article", article)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
+func getArticle(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ article, ok := ctx.Value("article").(*Article)
+ if !ok {
+ http.Error(w, http.StatusText(422), 422)
+ return
+ }
+ w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
+}
+
+// A completely separate router for administrator routes
+func adminRouter() http.Handler {
+ r := chi.NewRouter()
+ r.Use(AdminOnly)
+ r.Get("/", adminIndex)
+ r.Get("/accounts", adminListAccounts)
+ return r
+}
+
+func AdminOnly(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ perm, ok := ctx.Value("acl.permission").(YourPermissionType)
+ if !ok || !perm.IsAdmin() {
+ http.Error(w, http.StatusText(403), 403)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+
+## Router design
+
+chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
+The router is fully compatible with `net/http`.
+
+Built on top of the tree is the `Router` interface:
+
+```go
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one or more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the github.com/go-chi/docgen package to generate documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+```
+
+Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
+supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
+can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
+and `chi.URLParam(r, "*")` for a wildcard parameter.
+
+
+### Middleware handlers
+
+chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
+about them, which means the router and all the tooling is designed to be compatible and
+friendly with any middleware in the community. This offers much better extensibility and reuse
+of packages and is at the heart of chi's purpose.
+
+Here is an example of a standard net/http middleware handler using the new request context
+available in Go. This middleware sets a hypothetical user identifier on the request
+context and calls the next handler in the chain.
+
+```go
+// HTTP middleware setting a value on the request context
+func MyMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := context.WithValue(r.Context(), "user", "123")
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+```
+
+
+### Request handlers
+
+chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
+func that reads a user identifier from the request context - hypothetically, identifying
+the user sending an authenticated request, validated+set by a previous middleware handler.
+
+```go
+// HTTP handler accessing data from the request context.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ user := r.Context().Value("user").(string)
+ w.Write([]byte(fmt.Sprintf("hi %s", user)))
+}
+```
+
+
+### URL parameters
+
+chi's router parses and stores URL parameters right onto the request context. Here is
+an example of how to access URL params in your net/http handlers. And of course, middlewares
+are able to access the same information.
+
+```go
+// HTTP handler accessing the url routing parameters.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ userID := chi.URLParam(r, "userID") // from a route like /users/{userID}
+
+ ctx := r.Context()
+ key := ctx.Value("key").(string)
+
+ w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
+}
+```
+
+
+## Middlewares
+
+chi comes equipped with an optional `middleware` package, providing a suite of standard
+`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
+with `net/http` can be used with chi's mux.
+
+### Core middlewares
+
+-----------------------------------------------------------------------------------------------------------
+| chi/middleware Handler | description |
+|:----------------------|:---------------------------------------------------------------------------------
+| AllowContentType | Explicit whitelist of accepted request Content-Types |
+| BasicAuth | Basic HTTP authentication |
+| Compress | Gzip compression for clients that accept compressed responses |
+| GetHead | Automatically route undefined HEAD requests to GET handlers |
+| Heartbeat | Monitoring endpoint to check the servers pulse |
+| Logger | Logs the start and end of each request with the elapsed processing time |
+| NoCache | Sets response headers to prevent clients from caching |
+| Profiler | Easily attach net/http/pprof to your routers |
+| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP |
+| Recoverer | Gracefully absorb panics and prints the stack trace |
+| RequestID | Injects a request ID into the context of each request |
+| RedirectSlashes | Redirect slashes on routing paths |
+| SetHeader | Short-hand middleware to set a response header key/value |
+| StripSlashes | Strip slashes on routing paths |
+| Throttle | Puts a ceiling on the number of concurrent requests |
+| Timeout | Signals to the request context when the timeout deadline is reached |
+| URLFormat | Parse extension from url and put it on request context |
+| WithValue | Short-hand middleware to set a key/value on the request context |
+-----------------------------------------------------------------------------------------------------------
+
+### Extra middlewares & packages
+
+Please see https://github.com/go-chi for additional packages.
+
+--------------------------------------------------------------------------------------------------------------------
+| package | description |
+|:---------------------------------------------------|:-------------------------------------------------------------
+| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
+| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
+| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
+| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
+| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging |
+| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter |
+| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library |
+| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources |
+| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer |
+--------------------------------------------------------------------------------------------------------------------
+
+please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware
+
+
+## context?
+
+`context` is a tiny pkg that provides simple interface to signal context across call stacks
+and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
+and is available in stdlib since go1.7.
+
+Learn more at https://blog.golang.org/context
+
+and..
+* Docs: https://golang.org/pkg/context
+* Source: https://github.com/golang/go/tree/master/src/context
+
+
+## Benchmarks
+
+The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
+
+Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop
+
+```shell
+BenchmarkChi_Param 3000000 475 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Param5 2000000 696 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Param20 1000000 1275 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParamWrite 3000000 505 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubStatic 3000000 508 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubParam 2000000 669 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubAll 10000 134627 ns/op 87699 B/op 609 allocs/op
+BenchmarkChi_GPlusStatic 3000000 402 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlusParam 3000000 500 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlus2Params 3000000 586 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlusAll 200000 7237 ns/op 5616 B/op 39 allocs/op
+BenchmarkChi_ParseStatic 3000000 408 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParseParam 3000000 488 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Parse2Params 3000000 551 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParseAll 100000 13508 ns/op 11232 B/op 78 allocs/op
+BenchmarkChi_StaticAll 20000 81933 ns/op 67826 B/op 471 allocs/op
+```
+
+Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
+
+NOTE: the allocs in the benchmark above are from the calls to http.Request's
+`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
+on the duplicated (alloc'd) request and returns it the new request object. This is just
+how setting context on a request in Go works.
+
+
+## Credits
+
+* Carl Jackson for https://github.com/zenazn/goji
+ * Parts of chi's thinking comes from goji, and chi's middleware package
+ sources from goji.
+* Armon Dadgar for https://github.com/armon/go-radix
+* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
+
+We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
+
+
+## Beyond REST
+
+chi is just a http router that lets you decompose request handling into many smaller layers.
+Many companies use chi to write REST services for their public APIs. But, REST is just a convention
+for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server
+system or network of microservices.
+
+Looking beyond REST, I also recommend some newer works in the field:
+* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen
+* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs
+* [graphql](https://github.com/99designs/gqlgen) - Declarative query language
+* [NATS](https://nats.io) - lightweight pub-sub
+
+
+## License
+
+Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
+
+Licensed under [MIT License](./LICENSE)
+
+[GoDoc]: https://godoc.org/github.com/go-chi/chi
+[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
+[Travis]: https://travis-ci.org/go-chi/chi
+[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master
diff --git a/vendor/github.com/go-chi/chi/chain.go b/vendor/github.com/go-chi/chi/chain.go
new file mode 100644
index 0000000000..88e6846138
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/chain.go
@@ -0,0 +1,49 @@
+package chi
+
+import "net/http"
+
+// Chain returns a Middlewares type from a slice of middleware handlers.
+func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
+ return Middlewares(middlewares)
+}
+
+// Handler builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) Handler(h http.Handler) http.Handler {
+ return &ChainHandler{mws, h, chain(mws, h)}
+}
+
+// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
+ return &ChainHandler{mws, h, chain(mws, h)}
+}
+
+// ChainHandler is a http.Handler with support for handler composition and
+// execution.
+type ChainHandler struct {
+ Middlewares Middlewares
+ Endpoint http.Handler
+ chain http.Handler
+}
+
+func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ c.chain.ServeHTTP(w, r)
+}
+
+// chain builds a http.Handler composed of an inline middleware stack and endpoint
+// handler in the order they are passed.
+func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
+ // Return ahead of time if there aren't any middlewares for the chain
+ if len(middlewares) == 0 {
+ return endpoint
+ }
+
+ // Wrap the end handler with the middleware chain
+ h := middlewares[len(middlewares)-1](endpoint)
+ for i := len(middlewares) - 2; i >= 0; i-- {
+ h = middlewares[i](h)
+ }
+
+ return h
+}
diff --git a/vendor/github.com/go-chi/chi/chi.go b/vendor/github.com/go-chi/chi/chi.go
new file mode 100644
index 0000000000..b7063dc297
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/chi.go
@@ -0,0 +1,134 @@
+//
+// Package chi is a small, idiomatic and composable router for building HTTP services.
+//
+// chi requires Go 1.10 or newer.
+//
+// Example:
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/go-chi/chi"
+// "github.com/go-chi/chi/middleware"
+// )
+//
+// func main() {
+// r := chi.NewRouter()
+// r.Use(middleware.Logger)
+// r.Use(middleware.Recoverer)
+//
+// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("root."))
+// })
+//
+// http.ListenAndServe(":3333", r)
+// }
+//
+// See github.com/go-chi/chi/_examples/ for more in-depth examples.
+//
+// URL patterns allow for easy matching of path components in HTTP
+// requests. The matching components can then be accessed using
+// chi.URLParam(). All patterns must begin with a slash.
+//
+// A simple named placeholder {name} matches any sequence of characters
+// up to the next / or the end of the URL. Trailing slashes on paths must
+// be handled explicitly.
+//
+// A placeholder with a name followed by a colon allows a regular
+// expression match, for example {number:\\d+}. The regular expression
+// syntax is Go's normal regexp RE2 syntax, except that regular expressions
+// including { or } are not supported, and / will never be
+// matched. An anonymous regexp pattern is allowed, using an empty string
+// before the colon in the placeholder, such as {:\\d+}
+//
+// The special placeholder of asterisk matches the rest of the requested
+// URL. Any trailing characters in the pattern are ignored. This is the only
+// placeholder which will match / characters.
+//
+// Examples:
+// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
+// "/user/{name}/info" matches "/user/jsmith/info"
+// "/page/*" matches "/page/intro/latest"
+// "/page/*/index" also matches "/page/intro/latest"
+// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
+//
+package chi
+
+import "net/http"
+
+// NewRouter returns a new Mux object that implements the Router interface.
+func NewRouter() *Mux {
+ return NewMux()
+}
+
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one or more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the `docgen` subpackage to generation documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+
+// Middlewares type is a slice of standard middleware handlers with methods
+// to compose middleware chains and http.Handler's.
+type Middlewares []func(http.Handler) http.Handler
diff --git a/vendor/github.com/go-chi/chi/context.go b/vendor/github.com/go-chi/chi/context.go
new file mode 100644
index 0000000000..26c609ea2c
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/context.go
@@ -0,0 +1,172 @@
+package chi
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "strings"
+)
+
+// URLParam returns the url parameter from a http.Request object.
+func URLParam(r *http.Request, key string) string {
+ if rctx := RouteContext(r.Context()); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// URLParamFromCtx returns the url parameter from a http.Request Context.
+func URLParamFromCtx(ctx context.Context, key string) string {
+ if rctx := RouteContext(ctx); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// RouteContext returns chi's routing Context object from a
+// http.Request Context.
+func RouteContext(ctx context.Context) *Context {
+ val, _ := ctx.Value(RouteCtxKey).(*Context)
+ return val
+}
+
+// ServerBaseContext wraps an http.Handler to set the request context to the
+// `baseCtx`.
+func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler {
+ fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ baseCtx := baseCtx
+
+ // Copy over default net/http server context keys
+ if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok {
+ baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v)
+ }
+ if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok {
+ baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v)
+ }
+
+ h.ServeHTTP(w, r.WithContext(baseCtx))
+ })
+ return fn
+}
+
+// NewRouteContext returns a new routing Context object.
+func NewRouteContext() *Context {
+ return &Context{}
+}
+
+var (
+ // RouteCtxKey is the context.Context key to store the request context.
+ RouteCtxKey = &contextKey{"RouteContext"}
+)
+
+// Context is the default routing context set on the root node of a
+// request context to track route patterns, URL parameters and
+// an optional routing path.
+type Context struct {
+ Routes Routes
+
+ // Routing path/method override used during the route search.
+ // See Mux#routeHTTP method.
+ RoutePath string
+ RouteMethod string
+
+ // Routing pattern stack throughout the lifecycle of the request,
+ // across all connected routers. It is a record of all matching
+ // patterns across a stack of sub-routers.
+ RoutePatterns []string
+
+ // URLParams are the stack of routeParams captured during the
+ // routing lifecycle across a stack of sub-routers.
+ URLParams RouteParams
+
+ // The endpoint routing pattern that matched the request URI path
+ // or `RoutePath` of the current sub-router. This value will update
+ // during the lifecycle of a request passing through a stack of
+ // sub-routers.
+ routePattern string
+
+ // Route parameters matched for the current sub-router. It is
+ // intentionally unexported so it cant be tampered.
+ routeParams RouteParams
+
+ // methodNotAllowed hint
+ methodNotAllowed bool
+}
+
+// Reset a routing context to its initial state.
+func (x *Context) Reset() {
+ x.Routes = nil
+ x.RoutePath = ""
+ x.RouteMethod = ""
+ x.RoutePatterns = x.RoutePatterns[:0]
+ x.URLParams.Keys = x.URLParams.Keys[:0]
+ x.URLParams.Values = x.URLParams.Values[:0]
+
+ x.routePattern = ""
+ x.routeParams.Keys = x.routeParams.Keys[:0]
+ x.routeParams.Values = x.routeParams.Values[:0]
+ x.methodNotAllowed = false
+}
+
+// URLParam returns the corresponding URL parameter value from the request
+// routing context.
+func (x *Context) URLParam(key string) string {
+ for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
+ if x.URLParams.Keys[k] == key {
+ return x.URLParams.Values[k]
+ }
+ }
+ return ""
+}
+
+// RoutePattern builds the routing pattern string for the particular
+// request, at the particular point during routing. This means, the value
+// will change throughout the execution of a request in a router. That is
+// why its advised to only use this value after calling the next handler.
+//
+// For example,
+//
+// func Instrument(next http.Handler) http.Handler {
+// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// next.ServeHTTP(w, r)
+// routePattern := chi.RouteContext(r.Context()).RoutePattern()
+// measure(w, r, routePattern)
+// })
+// }
+func (x *Context) RoutePattern() string {
+ routePattern := strings.Join(x.RoutePatterns, "")
+ return replaceWildcards(routePattern)
+}
+
+// replaceWildcards takes a route pattern and recursively replaces all
+// occurrences of "/*/" to "/".
+func replaceWildcards(p string) string {
+ if strings.Contains(p, "/*/") {
+ return replaceWildcards(strings.Replace(p, "/*/", "/", -1))
+ }
+
+ return p
+}
+
+// RouteParams is a structure to track URL routing parameters efficiently.
+type RouteParams struct {
+ Keys, Values []string
+}
+
+// Add will append a URL parameter to the end of the route param
+func (s *RouteParams) Add(key, value string) {
+ s.Keys = append(s.Keys, key)
+ s.Values = append(s.Values, value)
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/basic_auth.go b/vendor/github.com/go-chi/chi/middleware/basic_auth.go
new file mode 100644
index 0000000000..87b2641a6a
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/basic_auth.go
@@ -0,0 +1,32 @@
+package middleware
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// BasicAuth implements a simple middleware handler for adding basic http auth to a route.
+func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ user, pass, ok := r.BasicAuth()
+ if !ok {
+ basicAuthFailed(w, realm)
+ return
+ }
+
+ credPass, credUserOk := creds[user]
+ if !credUserOk || pass != credPass {
+ basicAuthFailed(w, realm)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+func basicAuthFailed(w http.ResponseWriter, realm string) {
+ w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm))
+ w.WriteHeader(http.StatusUnauthorized)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/compress.go b/vendor/github.com/go-chi/chi/middleware/compress.go
new file mode 100644
index 0000000000..2f40cc15af
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/compress.go
@@ -0,0 +1,399 @@
+package middleware
+
+import (
+ "bufio"
+ "compress/flate"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var defaultCompressibleContentTypes = []string{
+ "text/html",
+ "text/css",
+ "text/plain",
+ "text/javascript",
+ "application/javascript",
+ "application/x-javascript",
+ "application/json",
+ "application/atom+xml",
+ "application/rss+xml",
+ "image/svg+xml",
+}
+
+// Compress is a middleware that compresses response
+// body of a given content types to a data format based
+// on Accept-Encoding request header. It uses a given
+// compression level.
+//
+// NOTE: make sure to set the Content-Type header on your response
+// otherwise this middleware will not compress the response body. For ex, in
+// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody))
+// or set it manually.
+//
+// Passing a compression level of 5 is sensible value
+func Compress(level int, types ...string) func(next http.Handler) http.Handler {
+ compressor := NewCompressor(level, types...)
+ return compressor.Handler
+}
+
+// Compressor represents a set of encoding configurations.
+type Compressor struct {
+ level int // The compression level.
+ // The mapping of encoder names to encoder functions.
+ encoders map[string]EncoderFunc
+ // The mapping of pooled encoders to pools.
+ pooledEncoders map[string]*sync.Pool
+ // The set of content types allowed to be compressed.
+ allowedTypes map[string]struct{}
+ allowedWildcards map[string]struct{}
+ // The list of encoders in order of decreasing precedence.
+ encodingPrecedence []string
+}
+
+// NewCompressor creates a new Compressor that will handle encoding responses.
+//
+// The level should be one of the ones defined in the flate package.
+// The types are the content types that are allowed to be compressed.
+func NewCompressor(level int, types ...string) *Compressor {
+ // If types are provided, set those as the allowed types. If none are
+ // provided, use the default list.
+ allowedTypes := make(map[string]struct{})
+ allowedWildcards := make(map[string]struct{})
+ if len(types) > 0 {
+ for _, t := range types {
+ if strings.Contains(strings.TrimSuffix(t, "/*"), "*") {
+ panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t))
+ }
+ if strings.HasSuffix(t, "/*") {
+ allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{}
+ } else {
+ allowedTypes[t] = struct{}{}
+ }
+ }
+ } else {
+ for _, t := range defaultCompressibleContentTypes {
+ allowedTypes[t] = struct{}{}
+ }
+ }
+
+ c := &Compressor{
+ level: level,
+ encoders: make(map[string]EncoderFunc),
+ pooledEncoders: make(map[string]*sync.Pool),
+ allowedTypes: allowedTypes,
+ allowedWildcards: allowedWildcards,
+ }
+
+ // Set the default encoders. The precedence order uses the reverse
+ // ordering that the encoders were added. This means adding new encoders
+ // will move them to the front of the order.
+ //
+ // TODO:
+ // lzma: Opera.
+ // sdch: Chrome, Android. Gzip output + dictionary header.
+ // br: Brotli, see https://github.com/go-chi/chi/pull/326
+
+ // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
+ // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
+ // checksum compared to CRC-32 used in "gzip" and thus is faster.
+ //
+ // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
+ // raw DEFLATE data only, without the mentioned zlib wrapper.
+ // Because of this major confusion, most modern browsers try it
+ // both ways, first looking for zlib headers.
+ // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
+ //
+ // The list of browsers having problems is quite big, see:
+ // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
+ //
+ // That's why we prefer gzip over deflate. It's just more reliable
+ // and not significantly slower than gzip.
+ c.SetEncoder("deflate", encoderDeflate)
+
+ // TODO: Exception for old MSIE browsers that can't handle non-HTML?
+ // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ c.SetEncoder("gzip", encoderGzip)
+
+ // NOTE: Not implemented, intentionally:
+ // case "compress": // LZW. Deprecated.
+ // case "bzip2": // Too slow on-the-fly.
+ // case "zopfli": // Too slow on-the-fly.
+ // case "xz": // Too slow on-the-fly.
+ return c
+}
+
+// SetEncoder can be used to set the implementation of a compression algorithm.
+//
+// The encoding should be a standardised identifier. See:
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
+//
+// For example, add the Brotli algortithm:
+//
+// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
+//
+// compressor := middleware.NewCompressor(5, "text/html")
+// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer {
+// params := brotli_enc.NewBrotliParams()
+// params.SetQuality(level)
+// return brotli_enc.NewBrotliWriter(params, w)
+// })
+func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
+ encoding = strings.ToLower(encoding)
+ if encoding == "" {
+ panic("the encoding can not be empty")
+ }
+ if fn == nil {
+ panic("attempted to set a nil encoder function")
+ }
+
+ // If we are adding a new encoder that is already registered, we have to
+ // clear that one out first.
+ if _, ok := c.pooledEncoders[encoding]; ok {
+ delete(c.pooledEncoders, encoding)
+ }
+ if _, ok := c.encoders[encoding]; ok {
+ delete(c.encoders, encoding)
+ }
+
+ // If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
+ encoder := fn(ioutil.Discard, c.level)
+ if encoder != nil {
+ if _, ok := encoder.(ioResetterWriter); ok {
+ pool := &sync.Pool{
+ New: func() interface{} {
+ return fn(ioutil.Discard, c.level)
+ },
+ }
+ c.pooledEncoders[encoding] = pool
+ }
+ }
+ // If the encoder is not in the pooledEncoders, add it to the normal encoders.
+ if _, ok := c.pooledEncoders[encoding]; !ok {
+ c.encoders[encoding] = fn
+ }
+
+ for i, v := range c.encodingPrecedence {
+ if v == encoding {
+ c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
+ }
+ }
+
+ c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
+}
+
+// Handler returns a new middleware that will compress the response based on the
+// current Compressor.
+func (c *Compressor) Handler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ encoder, encoding, cleanup := c.selectEncoder(r.Header, w)
+
+ cw := &compressResponseWriter{
+ ResponseWriter: w,
+ w: w,
+ contentTypes: c.allowedTypes,
+ contentWildcards: c.allowedWildcards,
+ encoding: encoding,
+ compressable: false, // determined in post-handler
+ }
+ if encoder != nil {
+ cw.w = encoder
+ }
+ // Re-add the encoder to the pool if applicable.
+ defer cleanup()
+ defer cw.Close()
+
+ next.ServeHTTP(cw, r)
+ })
+}
+
+// selectEncoder returns the encoder, the name of the encoder, and a closer function.
+func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) {
+ header := h.Get("Accept-Encoding")
+
+ // Parse the names of all accepted algorithms from the header.
+ accepted := strings.Split(strings.ToLower(header), ",")
+
+ // Find supported encoder by accepted list by precedence
+ for _, name := range c.encodingPrecedence {
+ if matchAcceptEncoding(accepted, name) {
+ if pool, ok := c.pooledEncoders[name]; ok {
+ encoder := pool.Get().(ioResetterWriter)
+ cleanup := func() {
+ pool.Put(encoder)
+ }
+ encoder.Reset(w)
+ return encoder, name, cleanup
+
+ }
+ if fn, ok := c.encoders[name]; ok {
+ return fn(w, c.level), name, func() {}
+ }
+ }
+
+ }
+
+ // No encoder found to match the accepted encoding
+ return nil, "", func() {}
+}
+
+func matchAcceptEncoding(accepted []string, encoding string) bool {
+ for _, v := range accepted {
+ if strings.Contains(v, encoding) {
+ return true
+ }
+ }
+ return false
+}
+
+// An EncoderFunc is a function that wraps the provided io.Writer with a
+// streaming compression algorithm and returns it.
+//
+// In case of failure, the function should return nil.
+type EncoderFunc func(w io.Writer, level int) io.Writer
+
+// Interface for types that allow resetting io.Writers.
+type ioResetterWriter interface {
+ io.Writer
+ Reset(w io.Writer)
+}
+
+type compressResponseWriter struct {
+ http.ResponseWriter
+
+ // The streaming encoder writer to be used if there is one. Otherwise,
+ // this is just the normal writer.
+ w io.Writer
+ encoding string
+ contentTypes map[string]struct{}
+ contentWildcards map[string]struct{}
+ wroteHeader bool
+ compressable bool
+}
+
+func (cw *compressResponseWriter) isCompressable() bool {
+ // Parse the first part of the Content-Type response header.
+ contentType := cw.Header().Get("Content-Type")
+ if idx := strings.Index(contentType, ";"); idx >= 0 {
+ contentType = contentType[0:idx]
+ }
+
+ // Is the content type compressable?
+ if _, ok := cw.contentTypes[contentType]; ok {
+ return true
+ }
+ if idx := strings.Index(contentType, "/"); idx > 0 {
+ contentType = contentType[0:idx]
+ _, ok := cw.contentWildcards[contentType]
+ return ok
+ }
+ return false
+}
+
+func (cw *compressResponseWriter) WriteHeader(code int) {
+ if cw.wroteHeader {
+ cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate.
+ return
+ }
+ cw.wroteHeader = true
+ defer cw.ResponseWriter.WriteHeader(code)
+
+ // Already compressed data?
+ if cw.Header().Get("Content-Encoding") != "" {
+ return
+ }
+
+ if !cw.isCompressable() {
+ cw.compressable = false
+ return
+ }
+
+ if cw.encoding != "" {
+ cw.compressable = true
+ cw.Header().Set("Content-Encoding", cw.encoding)
+ cw.Header().Set("Vary", "Accept-Encoding")
+
+ // The content-length after compression is unknown
+ cw.Header().Del("Content-Length")
+ }
+}
+
+func (cw *compressResponseWriter) Write(p []byte) (int, error) {
+ if !cw.wroteHeader {
+ cw.WriteHeader(http.StatusOK)
+ }
+
+ return cw.writer().Write(p)
+}
+
+func (cw *compressResponseWriter) writer() io.Writer {
+ if cw.compressable {
+ return cw.w
+ } else {
+ return cw.ResponseWriter
+ }
+}
+
+type compressFlusher interface {
+ Flush() error
+}
+
+func (cw *compressResponseWriter) Flush() {
+ if f, ok := cw.writer().(http.Flusher); ok {
+ f.Flush()
+ }
+ // If the underlying writer has a compression flush signature,
+ // call this Flush() method instead
+ if f, ok := cw.writer().(compressFlusher); ok {
+ f.Flush()
+
+ // Also flush the underlying response writer
+ if f, ok := cw.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ }
+ }
+}
+
+func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if hj, ok := cw.writer().(http.Hijacker); ok {
+ return hj.Hijack()
+ }
+ return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer")
+}
+
+func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error {
+ if ps, ok := cw.writer().(http.Pusher); ok {
+ return ps.Push(target, opts)
+ }
+ return errors.New("chi/middleware: http.Pusher is unavailable on the writer")
+}
+
+func (cw *compressResponseWriter) Close() error {
+ if c, ok := cw.writer().(io.WriteCloser); ok {
+ return c.Close()
+ }
+ return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer")
+}
+
+func encoderGzip(w io.Writer, level int) io.Writer {
+ gw, err := gzip.NewWriterLevel(w, level)
+ if err != nil {
+ return nil
+ }
+ return gw
+}
+
+func encoderDeflate(w io.Writer, level int) io.Writer {
+ dw, err := flate.NewWriter(w, level)
+ if err != nil {
+ return nil
+ }
+ return dw
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_charset.go b/vendor/github.com/go-chi/chi/middleware/content_charset.go
new file mode 100644
index 0000000000..07b5ce6f66
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_charset.go
@@ -0,0 +1,51 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match.
+// An empty charset will allow requests with no Content-Type header or no specified charset.
+func ContentCharset(charsets ...string) func(next http.Handler) http.Handler {
+ for i, c := range charsets {
+ charsets[i] = strings.ToLower(c)
+ }
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !contentEncoding(r.Header.Get("Content-Type"), charsets...) {
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+// Check the content encoding against a list of acceptable values.
+func contentEncoding(ce string, charsets ...string) bool {
+ _, ce = split(strings.ToLower(ce), ";")
+ _, ce = split(ce, "charset=")
+ ce, _ = split(ce, ";")
+ for _, c := range charsets {
+ if ce == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Split a string in two parts, cleaning any whitespace.
+func split(str, sep string) (string, string) {
+ var a, b string
+ var parts = strings.SplitN(str, sep, 2)
+ a = strings.TrimSpace(parts[0])
+ if len(parts) == 2 {
+ b = strings.TrimSpace(parts[1])
+ }
+
+ return a, b
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_encoding.go b/vendor/github.com/go-chi/chi/middleware/content_encoding.go
new file mode 100644
index 0000000000..e0b9ccc08a
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_encoding.go
@@ -0,0 +1,34 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds
+// with a 415 Unsupported Media Type status.
+func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler {
+ allowedEncodings := make(map[string]struct{}, len(contentEncoding))
+ for _, encoding := range contentEncoding {
+ allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{}
+ }
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ requestEncodings := r.Header["Content-Encoding"]
+ // skip check for empty content body or no Content-Encoding
+ if r.ContentLength == 0 {
+ next.ServeHTTP(w, r)
+ return
+ }
+ // All encodings in the request must be allowed
+ for _, encoding := range requestEncodings {
+ if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok {
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ return
+ }
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_type.go b/vendor/github.com/go-chi/chi/middleware/content_type.go
new file mode 100644
index 0000000000..ee4957874f
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_type.go
@@ -0,0 +1,51 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// SetHeader is a convenience handler to set a response header key/value
+func SetHeader(key, value string) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(key, value)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// AllowContentType enforces a whitelist of request Content-Types otherwise responds
+// with a 415 Unsupported Media Type status.
+func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler {
+ cT := []string{}
+ for _, t := range contentTypes {
+ cT = append(cT, strings.ToLower(t))
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if r.ContentLength == 0 {
+ // skip check for empty content body
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type")))
+ if i := strings.Index(s, ";"); i > -1 {
+ s = s[0:i]
+ }
+
+ for _, t := range cT {
+ if t == s {
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/get_head.go b/vendor/github.com/go-chi/chi/middleware/get_head.go
new file mode 100644
index 0000000000..86068a96db
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/get_head.go
@@ -0,0 +1,39 @@
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/go-chi/chi"
+)
+
+// GetHead automatically route undefined HEAD requests to GET handlers.
+func GetHead(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "HEAD" {
+ rctx := chi.RouteContext(r.Context())
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ }
+
+ // Temporary routing context to look-ahead before routing the request
+ tctx := chi.NewRouteContext()
+
+ // Attempt to find a HEAD handler for the routing path, if not found, traverse
+ // the router as through its a GET route, but proceed with the request
+ // with the HEAD method.
+ if !rctx.Routes.Match(tctx, "HEAD", routePath) {
+ rctx.RouteMethod = "GET"
+ rctx.RoutePath = routePath
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/heartbeat.go b/vendor/github.com/go-chi/chi/middleware/heartbeat.go
new file mode 100644
index 0000000000..fe822fb536
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/heartbeat.go
@@ -0,0 +1,26 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Heartbeat endpoint middleware useful to setting up a path like
+// `/ping` that load balancers or uptime testing external services
+// can make a request before hitting any routes. It's also convenient
+// to place this above ACL middlewares as well.
+func Heartbeat(endpoint string) func(http.Handler) http.Handler {
+ f := func(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) {
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("."))
+ return
+ }
+ h.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+ return f
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/logger.go b/vendor/github.com/go-chi/chi/middleware/logger.go
new file mode 100644
index 0000000000..158a6a3905
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/logger.go
@@ -0,0 +1,155 @@
+package middleware
+
+import (
+ "bytes"
+ "context"
+ "log"
+ "net/http"
+ "os"
+ "time"
+)
+
+var (
+ // LogEntryCtxKey is the context.Context key to store the request log entry.
+ LogEntryCtxKey = &contextKey{"LogEntry"}
+
+ // DefaultLogger is called by the Logger middleware handler to log each request.
+ // Its made a package-level variable so that it can be reconfigured for custom
+ // logging configurations.
+ DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false})
+)
+
+// Logger is a middleware that logs the start and end of each request, along
+// with some useful data about what was requested, what the response status was,
+// and how long it took to return. When standard output is a TTY, Logger will
+// print in color, otherwise it will print in black and white. Logger prints a
+// request ID if one is provided.
+//
+// Alternatively, look at https://github.com/goware/httplog for a more in-depth
+// http logger with structured logging support.
+func Logger(next http.Handler) http.Handler {
+ return DefaultLogger(next)
+}
+
+// RequestLogger returns a logger handler using a custom LogFormatter.
+func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ entry := f.NewLogEntry(r)
+ ww := NewWrapResponseWriter(w, r.ProtoMajor)
+
+ t1 := time.Now()
+ defer func() {
+ entry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil)
+ }()
+
+ next.ServeHTTP(ww, WithLogEntry(r, entry))
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// LogFormatter initiates the beginning of a new LogEntry per request.
+// See DefaultLogFormatter for an example implementation.
+type LogFormatter interface {
+ NewLogEntry(r *http.Request) LogEntry
+}
+
+// LogEntry records the final log when a request completes.
+// See defaultLogEntry for an example implementation.
+type LogEntry interface {
+ Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{})
+ Panic(v interface{}, stack []byte)
+}
+
+// GetLogEntry returns the in-context LogEntry for a request.
+func GetLogEntry(r *http.Request) LogEntry {
+ entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry)
+ return entry
+}
+
+// WithLogEntry sets the in-context LogEntry for a request.
+func WithLogEntry(r *http.Request, entry LogEntry) *http.Request {
+ r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry))
+ return r
+}
+
+// LoggerInterface accepts printing to stdlib logger or compatible logger.
+type LoggerInterface interface {
+ Print(v ...interface{})
+}
+
+// DefaultLogFormatter is a simple logger that implements a LogFormatter.
+type DefaultLogFormatter struct {
+ Logger LoggerInterface
+ NoColor bool
+}
+
+// NewLogEntry creates a new LogEntry for the request.
+func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry {
+ useColor := !l.NoColor
+ entry := &defaultLogEntry{
+ DefaultLogFormatter: l,
+ request: r,
+ buf: &bytes.Buffer{},
+ useColor: useColor,
+ }
+
+ reqID := GetReqID(r.Context())
+ if reqID != "" {
+ cW(entry.buf, useColor, nYellow, "[%s] ", reqID)
+ }
+ cW(entry.buf, useColor, nCyan, "\"")
+ cW(entry.buf, useColor, bMagenta, "%s ", r.Method)
+
+ scheme := "http"
+ if r.TLS != nil {
+ scheme = "https"
+ }
+ cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto)
+
+ entry.buf.WriteString("from ")
+ entry.buf.WriteString(r.RemoteAddr)
+ entry.buf.WriteString(" - ")
+
+ return entry
+}
+
+type defaultLogEntry struct {
+ *DefaultLogFormatter
+ request *http.Request
+ buf *bytes.Buffer
+ useColor bool
+}
+
+func (l *defaultLogEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) {
+ switch {
+ case status < 200:
+ cW(l.buf, l.useColor, bBlue, "%03d", status)
+ case status < 300:
+ cW(l.buf, l.useColor, bGreen, "%03d", status)
+ case status < 400:
+ cW(l.buf, l.useColor, bCyan, "%03d", status)
+ case status < 500:
+ cW(l.buf, l.useColor, bYellow, "%03d", status)
+ default:
+ cW(l.buf, l.useColor, bRed, "%03d", status)
+ }
+
+ cW(l.buf, l.useColor, bBlue, " %dB", bytes)
+
+ l.buf.WriteString(" in ")
+ if elapsed < 500*time.Millisecond {
+ cW(l.buf, l.useColor, nGreen, "%s", elapsed)
+ } else if elapsed < 5*time.Second {
+ cW(l.buf, l.useColor, nYellow, "%s", elapsed)
+ } else {
+ cW(l.buf, l.useColor, nRed, "%s", elapsed)
+ }
+
+ l.Logger.Print(l.buf.String())
+}
+
+func (l *defaultLogEntry) Panic(v interface{}, stack []byte) {
+ PrintPrettyStack(v)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/middleware.go b/vendor/github.com/go-chi/chi/middleware/middleware.go
new file mode 100644
index 0000000000..cc371e00a8
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/middleware.go
@@ -0,0 +1,23 @@
+package middleware
+
+import "net/http"
+
+// New will create a new middleware handler from a http.Handler.
+func New(h http.Handler) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.ServeHTTP(w, r)
+ })
+ }
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi/middleware context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/nocache.go b/vendor/github.com/go-chi/chi/middleware/nocache.go
new file mode 100644
index 0000000000..2412829e1b
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/nocache.go
@@ -0,0 +1,58 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net/http"
+ "time"
+)
+
+// Unix epoch time
+var epoch = time.Unix(0, 0).Format(time.RFC1123)
+
+// Taken from https://github.com/mytrile/nocache
+var noCacheHeaders = map[string]string{
+ "Expires": epoch,
+ "Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0",
+ "Pragma": "no-cache",
+ "X-Accel-Expires": "0",
+}
+
+var etagHeaders = []string{
+ "ETag",
+ "If-Modified-Since",
+ "If-Match",
+ "If-None-Match",
+ "If-Range",
+ "If-Unmodified-Since",
+}
+
+// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent
+// a router (or subrouter) from being cached by an upstream proxy and/or client.
+//
+// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets:
+// Expires: Thu, 01 Jan 1970 00:00:00 UTC
+// Cache-Control: no-cache, private, max-age=0
+// X-Accel-Expires: 0
+// Pragma: no-cache (for HTTP/1.0 proxies/clients)
+func NoCache(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+
+ // Delete any ETag headers that may have been set
+ for _, v := range etagHeaders {
+ if r.Header.Get(v) != "" {
+ r.Header.Del(v)
+ }
+ }
+
+ // Set our NoCache headers
+ for k, v := range noCacheHeaders {
+ w.Header().Set(k, v)
+ }
+
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/profiler.go b/vendor/github.com/go-chi/chi/middleware/profiler.go
new file mode 100644
index 0000000000..1d44b8259a
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/profiler.go
@@ -0,0 +1,55 @@
+package middleware
+
+import (
+ "expvar"
+ "fmt"
+ "net/http"
+ "net/http/pprof"
+
+ "github.com/go-chi/chi"
+)
+
+// Profiler is a convenient subrouter used for mounting net/http/pprof. ie.
+//
+// func MyService() http.Handler {
+// r := chi.NewRouter()
+// // ..middlewares
+// r.Mount("/debug", middleware.Profiler())
+// // ..routes
+// return r
+// }
+func Profiler() http.Handler {
+ r := chi.NewRouter()
+ r.Use(NoCache)
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/pprof/", 301)
+ })
+ r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/", 301)
+ })
+
+ r.HandleFunc("/pprof/*", pprof.Index)
+ r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
+ r.HandleFunc("/pprof/profile", pprof.Profile)
+ r.HandleFunc("/pprof/symbol", pprof.Symbol)
+ r.HandleFunc("/pprof/trace", pprof.Trace)
+ r.HandleFunc("/vars", expVars)
+
+ return r
+}
+
+// Replicated from expvar.go as not public.
+func expVars(w http.ResponseWriter, r *http.Request) {
+ first := true
+ w.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(w, "{\n")
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/realip.go b/vendor/github.com/go-chi/chi/middleware/realip.go
new file mode 100644
index 0000000000..72db6ca9f5
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/realip.go
@@ -0,0 +1,54 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
+var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
+
+// RealIP is a middleware that sets a http.Request's RemoteAddr to the results
+// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that
+// order).
+//
+// This middleware should be inserted fairly early in the middleware stack to
+// ensure that subsequent layers (e.g., request loggers) which examine the
+// RemoteAddr will see the intended value.
+//
+// You should only use this middleware if you can trust the headers passed to
+// you (in particular, the two headers this middleware uses), for example
+// because you have placed a reverse proxy like HAProxy or nginx in front of
+// chi. If your reverse proxies are configured to pass along arbitrary header
+// values from the client, or if you use this middleware without a reverse
+// proxy, malicious clients will be able to make you very sad (or, depending on
+// how you're using RemoteAddr, vulnerable to an attack of some sort).
+func RealIP(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if rip := realIP(r); rip != "" {
+ r.RemoteAddr = rip
+ }
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+func realIP(r *http.Request) string {
+ var ip string
+
+ if xrip := r.Header.Get(xRealIP); xrip != "" {
+ ip = xrip
+ } else if xff := r.Header.Get(xForwardedFor); xff != "" {
+ i := strings.Index(xff, ", ")
+ if i == -1 {
+ i = len(xff)
+ }
+ ip = xff[:i]
+ }
+
+ return ip
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/recoverer.go b/vendor/github.com/go-chi/chi/middleware/recoverer.go
new file mode 100644
index 0000000000..785b18c52b
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/recoverer.go
@@ -0,0 +1,192 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "runtime/debug"
+ "strings"
+)
+
+// Recoverer is a middleware that recovers from panics, logs the panic (and a
+// backtrace), and returns a HTTP 500 (Internal Server Error) status if
+// possible. Recoverer prints a request ID if one is provided.
+//
+// Alternatively, look at https://github.com/pressly/lg middleware pkgs.
+func Recoverer(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if rvr := recover(); rvr != nil && rvr != http.ErrAbortHandler {
+
+ logEntry := GetLogEntry(r)
+ if logEntry != nil {
+ logEntry.Panic(rvr, debug.Stack())
+ } else {
+ PrintPrettyStack(rvr)
+ }
+
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ }()
+
+ next.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+func PrintPrettyStack(rvr interface{}) {
+ debugStack := debug.Stack()
+ s := prettyStack{}
+ out, err := s.parse(debugStack, rvr)
+ if err == nil {
+ os.Stderr.Write(out)
+ } else {
+ // print stdlib output as a fallback
+ os.Stderr.Write(debugStack)
+ }
+}
+
+type prettyStack struct {
+}
+
+func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) {
+ var err error
+ useColor := true
+ buf := &bytes.Buffer{}
+
+ cW(buf, false, bRed, "\n")
+ cW(buf, useColor, bCyan, " panic: ")
+ cW(buf, useColor, bBlue, "%v", rvr)
+ cW(buf, false, bWhite, "\n \n")
+
+ // process debug stack info
+ stack := strings.Split(string(debugStack), "\n")
+ lines := []string{}
+
+ // locate panic line, as we may have nested panics
+ for i := len(stack) - 1; i > 0; i-- {
+ lines = append(lines, stack[i])
+ if strings.HasPrefix(stack[i], "panic(0x") {
+ lines = lines[0 : len(lines)-2] // remove boilerplate
+ break
+ }
+ }
+
+ // reverse
+ for i := len(lines)/2 - 1; i >= 0; i-- {
+ opp := len(lines) - 1 - i
+ lines[i], lines[opp] = lines[opp], lines[i]
+ }
+
+ // decorate
+ for i, line := range lines {
+ lines[i], err = s.decorateLine(line, useColor, i)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for _, l := range lines {
+ fmt.Fprintf(buf, "%s", l)
+ }
+ return buf.Bytes(), nil
+}
+
+func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) {
+ line = strings.TrimSpace(line)
+ if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") {
+ return s.decorateSourceLine(line, useColor, num)
+ } else if strings.HasSuffix(line, ")") {
+ return s.decorateFuncCallLine(line, useColor, num)
+ } else {
+ if strings.HasPrefix(line, "\t") {
+ return strings.Replace(line, "\t", " ", 1), nil
+ } else {
+ return fmt.Sprintf(" %s\n", line), nil
+ }
+ }
+}
+
+func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) {
+ idx := strings.LastIndex(line, "(")
+ if idx < 0 {
+ return "", errors.New("not a func call line")
+ }
+
+ buf := &bytes.Buffer{}
+ pkg := line[0:idx]
+ // addr := line[idx:]
+ method := ""
+
+ idx = strings.LastIndex(pkg, string(os.PathSeparator))
+ if idx < 0 {
+ idx = strings.Index(pkg, ".")
+ method = pkg[idx:]
+ pkg = pkg[0:idx]
+ } else {
+ method = pkg[idx+1:]
+ pkg = pkg[0 : idx+1]
+ idx = strings.Index(method, ".")
+ pkg += method[0:idx]
+ method = method[idx:]
+ }
+ pkgColor := nYellow
+ methodColor := bGreen
+
+ if num == 0 {
+ cW(buf, useColor, bRed, " -> ")
+ pkgColor = bMagenta
+ methodColor = bRed
+ } else {
+ cW(buf, useColor, bWhite, " ")
+ }
+ cW(buf, useColor, pkgColor, "%s", pkg)
+ cW(buf, useColor, methodColor, "%s\n", method)
+ // cW(buf, useColor, nBlack, "%s", addr)
+ return buf.String(), nil
+}
+
+func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) {
+ idx := strings.LastIndex(line, ".go:")
+ if idx < 0 {
+ return "", errors.New("not a source line")
+ }
+
+ buf := &bytes.Buffer{}
+ path := line[0 : idx+3]
+ lineno := line[idx+3:]
+
+ idx = strings.LastIndex(path, string(os.PathSeparator))
+ dir := path[0 : idx+1]
+ file := path[idx+1:]
+
+ idx = strings.Index(lineno, " ")
+ if idx > 0 {
+ lineno = lineno[0:idx]
+ }
+ fileColor := bCyan
+ lineColor := bGreen
+
+ if num == 1 {
+ cW(buf, useColor, bRed, " -> ")
+ fileColor = bRed
+ lineColor = bMagenta
+ } else {
+ cW(buf, false, bWhite, " ")
+ }
+ cW(buf, useColor, bWhite, "%s", dir)
+ cW(buf, useColor, fileColor, "%s", file)
+ cW(buf, useColor, lineColor, "%s", lineno)
+ if num == 1 {
+ cW(buf, false, bWhite, "\n")
+ }
+ cW(buf, false, bWhite, "\n")
+
+ return buf.String(), nil
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/request_id.go b/vendor/github.com/go-chi/chi/middleware/request_id.go
new file mode 100644
index 0000000000..4903ecc214
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/request_id.go
@@ -0,0 +1,96 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "sync/atomic"
+)
+
+// Key to use when setting the request ID.
+type ctxKeyRequestID int
+
+// RequestIDKey is the key that holds the unique request ID in a request context.
+const RequestIDKey ctxKeyRequestID = 0
+
+// RequestIDHeader is the name of the HTTP Header which contains the request id.
+// Exported so that it can be changed by developers
+var RequestIDHeader = "X-Request-Id"
+
+var prefix string
+var reqid uint64
+
+// A quick note on the statistics here: we're trying to calculate the chance that
+// two randomly generated base62 prefixes will collide. We use the formula from
+// http://en.wikipedia.org/wiki/Birthday_problem
+//
+// P[m, n] \approx 1 - e^{-m^2/2n}
+//
+// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server
+// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$
+//
+// For a $k$ character base-62 identifier, we have $n(k) = 62^k$
+//
+// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for
+// our purposes, and is surely more than anyone would ever need in practice -- a
+// process that is rebooted a handful of times a day for a hundred years has less
+// than a millionth of a percent chance of generating two colliding IDs.
+
+func init() {
+ hostname, err := os.Hostname()
+ if hostname == "" || err != nil {
+ hostname = "localhost"
+ }
+ var buf [12]byte
+ var b64 string
+ for len(b64) < 10 {
+ rand.Read(buf[:])
+ b64 = base64.StdEncoding.EncodeToString(buf[:])
+ b64 = strings.NewReplacer("+", "", "/", "").Replace(b64)
+ }
+
+ prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10])
+}
+
+// RequestID is a middleware that injects a request ID into the context of each
+// request. A request ID is a string of the form "host.example.com/random-0001",
+// where "random" is a base62 random string that uniquely identifies this go
+// process, and where the last number is an atomically incremented request
+// counter.
+func RequestID(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ requestID := r.Header.Get(RequestIDHeader)
+ if requestID == "" {
+ myid := atomic.AddUint64(&reqid, 1)
+ requestID = fmt.Sprintf("%s-%06d", prefix, myid)
+ }
+ ctx = context.WithValue(ctx, RequestIDKey, requestID)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ }
+ return http.HandlerFunc(fn)
+}
+
+// GetReqID returns a request ID from the given context if one is present.
+// Returns the empty string if a request ID cannot be found.
+func GetReqID(ctx context.Context) string {
+ if ctx == nil {
+ return ""
+ }
+ if reqID, ok := ctx.Value(RequestIDKey).(string); ok {
+ return reqID
+ }
+ return ""
+}
+
+// NextRequestID generates the next request ID in the sequence.
+func NextRequestID() uint64 {
+ return atomic.AddUint64(&reqid, 1)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/route_headers.go b/vendor/github.com/go-chi/chi/middleware/route_headers.go
new file mode 100644
index 0000000000..7ee30c8773
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/route_headers.go
@@ -0,0 +1,160 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// RouteHeaders is a neat little header-based router that allows you to direct
+// the flow of a request through a middleware stack based on a request header.
+//
+// For example, lets say you'd like to setup multiple routers depending on the
+// request Host header, you could then do something as so:
+//
+// r := chi.NewRouter()
+// rSubdomain := chi.NewRouter()
+//
+// r.Use(middleware.RouteHeaders().
+// Route("Host", "example.com", middleware.New(r)).
+// Route("Host", "*.example.com", middleware.New(rSubdomain)).
+// Handler)
+//
+// r.Get("/", h)
+// rSubdomain.Get("/", h2)
+//
+//
+// Another example, imagine you want to setup multiple CORS handlers, where for
+// your origin servers you allow authorized requests, but for third-party public
+// requests, authorization is disabled.
+//
+// r := chi.NewRouter()
+//
+// r.Use(middleware.RouteHeaders().
+// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{
+// AllowedOrigins: []string{"https://api.skyweaver.net"},
+// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
+// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
+// AllowCredentials: true, // <----------<<< allow credentials
+// })).
+// Route("Origin", "*", cors.Handler(cors.Options{
+// AllowedOrigins: []string{"*"},
+// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
+// AllowedHeaders: []string{"Accept", "Content-Type"},
+// AllowCredentials: false, // <----------<<< do not allow credentials
+// })).
+// Handler)
+//
+func RouteHeaders() HeaderRouter {
+ return HeaderRouter{}
+}
+
+type HeaderRouter map[string][]HeaderRoute
+
+func (hr HeaderRouter) Route(header string, match string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
+ header = strings.ToLower(header)
+ k := hr[header]
+ if k == nil {
+ hr[header] = []HeaderRoute{}
+ }
+ hr[header] = append(hr[header], HeaderRoute{MatchOne: NewPattern(match), Middleware: middlewareHandler})
+ return hr
+}
+
+func (hr HeaderRouter) RouteAny(header string, match []string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
+ header = strings.ToLower(header)
+ k := hr[header]
+ if k == nil {
+ hr[header] = []HeaderRoute{}
+ }
+ patterns := []Pattern{}
+ for _, m := range match {
+ patterns = append(patterns, NewPattern(m))
+ }
+ hr[header] = append(hr[header], HeaderRoute{MatchAny: patterns, Middleware: middlewareHandler})
+ return hr
+}
+
+func (hr HeaderRouter) RouteDefault(handler func(next http.Handler) http.Handler) HeaderRouter {
+ hr["*"] = []HeaderRoute{{Middleware: handler}}
+ return hr
+}
+
+func (hr HeaderRouter) Handler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if len(hr) == 0 {
+ // skip if no routes set
+ next.ServeHTTP(w, r)
+ }
+
+ // find first matching header route, and continue
+ for header, matchers := range hr {
+ headerValue := r.Header.Get(header)
+ if headerValue == "" {
+ continue
+ }
+ headerValue = strings.ToLower(headerValue)
+ for _, matcher := range matchers {
+ if matcher.IsMatch(headerValue) {
+ matcher.Middleware(next).ServeHTTP(w, r)
+ return
+ }
+ }
+ }
+
+ // if no match, check for "*" default route
+ matcher, ok := hr["*"]
+ if !ok || matcher[0].Middleware == nil {
+ next.ServeHTTP(w, r)
+ return
+ }
+ matcher[0].Middleware(next).ServeHTTP(w, r)
+ })
+}
+
+type HeaderRoute struct {
+ MatchAny []Pattern
+ MatchOne Pattern
+ Middleware func(next http.Handler) http.Handler
+}
+
+func (r HeaderRoute) IsMatch(value string) bool {
+ if len(r.MatchAny) > 0 {
+ for _, m := range r.MatchAny {
+ if m.Match(value) {
+ return true
+ }
+ }
+ } else if r.MatchOne.Match(value) {
+ return true
+ }
+ return false
+}
+
+type Pattern struct {
+ prefix string
+ suffix string
+ wildcard bool
+}
+
+func NewPattern(value string) Pattern {
+ p := Pattern{}
+ if i := strings.IndexByte(value, '*'); i >= 0 {
+ p.wildcard = true
+ p.prefix = value[0:i]
+ p.suffix = value[i+1:]
+ } else {
+ p.prefix = value
+ }
+ return p
+}
+
+func (p Pattern) Match(v string) bool {
+ if !p.wildcard {
+ if p.prefix == v {
+ return true
+ } else {
+ return false
+ }
+ }
+ return len(v) >= len(p.prefix+p.suffix) && strings.HasPrefix(v, p.prefix) && strings.HasSuffix(v, p.suffix)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/strip.go b/vendor/github.com/go-chi/chi/middleware/strip.go
new file mode 100644
index 0000000000..2b8b1842ab
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/strip.go
@@ -0,0 +1,56 @@
+package middleware
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/go-chi/chi"
+)
+
+// StripSlashes is a middleware that will match request paths with a trailing
+// slash, strip it from the path and continue routing through the mux, if a route
+// matches, then it will serve the handler.
+func StripSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ rctx.RoutePath = path[:len(path)-1]
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
+
+// RedirectSlashes is a middleware that will match request paths with a trailing
+// slash and redirect to the same path, less the trailing slash.
+//
+// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer,
+// see https://github.com/go-chi/chi/issues/343
+func RedirectSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ if r.URL.RawQuery != "" {
+ path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery)
+ } else {
+ path = path[:len(path)-1]
+ }
+ http.Redirect(w, r, path, 301)
+ return
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/terminal.go b/vendor/github.com/go-chi/chi/middleware/terminal.go
new file mode 100644
index 0000000000..5ead7b9243
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/terminal.go
@@ -0,0 +1,63 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+var (
+ // Normal colors
+ nBlack = []byte{'\033', '[', '3', '0', 'm'}
+ nRed = []byte{'\033', '[', '3', '1', 'm'}
+ nGreen = []byte{'\033', '[', '3', '2', 'm'}
+ nYellow = []byte{'\033', '[', '3', '3', 'm'}
+ nBlue = []byte{'\033', '[', '3', '4', 'm'}
+ nMagenta = []byte{'\033', '[', '3', '5', 'm'}
+ nCyan = []byte{'\033', '[', '3', '6', 'm'}
+ nWhite = []byte{'\033', '[', '3', '7', 'm'}
+ // Bright colors
+ bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'}
+ bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'}
+ bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'}
+ bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'}
+ bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'}
+ bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'}
+ bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'}
+ bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'}
+
+ reset = []byte{'\033', '[', '0', 'm'}
+)
+
+var IsTTY bool
+
+func init() {
+ // This is sort of cheating: if stdout is a character device, we assume
+ // that means it's a TTY. Unfortunately, there are many non-TTY
+ // character devices, but fortunately stdout is rarely set to any of
+ // them.
+ //
+ // We could solve this properly by pulling in a dependency on
+ // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a
+ // heuristic for whether to print in color or in black-and-white, I'd
+ // really rather not.
+ fi, err := os.Stdout.Stat()
+ if err == nil {
+ m := os.ModeDevice | os.ModeCharDevice
+ IsTTY = fi.Mode()&m == m
+ }
+}
+
+// colorWrite
+func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) {
+ if IsTTY && useColor {
+ w.Write(color)
+ }
+ fmt.Fprintf(w, s, args...)
+ if IsTTY && useColor {
+ w.Write(reset)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/throttle.go b/vendor/github.com/go-chi/chi/middleware/throttle.go
new file mode 100644
index 0000000000..fdedd3c127
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/throttle.go
@@ -0,0 +1,132 @@
+package middleware
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+)
+
+const (
+ errCapacityExceeded = "Server capacity exceeded."
+ errTimedOut = "Timed out while waiting for a pending request to complete."
+ errContextCanceled = "Context was canceled."
+)
+
+var (
+ defaultBacklogTimeout = time.Second * 60
+)
+
+// ThrottleOpts represents a set of throttling options.
+type ThrottleOpts struct {
+ Limit int
+ BacklogLimit int
+ BacklogTimeout time.Duration
+ RetryAfterFn func(ctxDone bool) time.Duration
+}
+
+// Throttle is a middleware that limits number of currently processed requests
+// at a time across all users. Note: Throttle is not a rate-limiter per user,
+// instead it just puts a ceiling on the number of currentl in-flight requests
+// being processed from the point from where the Throttle middleware is mounted.
+func Throttle(limit int) func(http.Handler) http.Handler {
+ return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout})
+}
+
+// ThrottleBacklog is a middleware that limits number of currently processed
+// requests at a time and provides a backlog for holding a finite number of
+// pending requests.
+func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler {
+ return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout})
+}
+
+// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts.
+func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler {
+ if opts.Limit < 1 {
+ panic("chi/middleware: Throttle expects limit > 0")
+ }
+
+ if opts.BacklogLimit < 0 {
+ panic("chi/middleware: Throttle expects backlogLimit to be positive")
+ }
+
+ t := throttler{
+ tokens: make(chan token, opts.Limit),
+ backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit),
+ backlogTimeout: opts.BacklogTimeout,
+ retryAfterFn: opts.RetryAfterFn,
+ }
+
+ // Filling tokens.
+ for i := 0; i < opts.Limit+opts.BacklogLimit; i++ {
+ if i < opts.Limit {
+ t.tokens <- token{}
+ }
+ t.backlogTokens <- token{}
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ select {
+
+ case <-ctx.Done():
+ t.setRetryAfterHeaderIfNeeded(w, true)
+ http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
+ return
+
+ case btok := <-t.backlogTokens:
+ timer := time.NewTimer(t.backlogTimeout)
+
+ defer func() {
+ t.backlogTokens <- btok
+ }()
+
+ select {
+ case <-timer.C:
+ t.setRetryAfterHeaderIfNeeded(w, false)
+ http.Error(w, errTimedOut, http.StatusServiceUnavailable)
+ return
+ case <-ctx.Done():
+ timer.Stop()
+ t.setRetryAfterHeaderIfNeeded(w, true)
+ http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
+ return
+ case tok := <-t.tokens:
+ defer func() {
+ timer.Stop()
+ t.tokens <- tok
+ }()
+ next.ServeHTTP(w, r)
+ }
+ return
+
+ default:
+ t.setRetryAfterHeaderIfNeeded(w, false)
+ http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable)
+ return
+ }
+ }
+
+ return http.HandlerFunc(fn)
+ }
+}
+
+// token represents a request that is being processed.
+type token struct{}
+
+// throttler limits number of currently processed requests at a time.
+type throttler struct {
+ tokens chan token
+ backlogTokens chan token
+ backlogTimeout time.Duration
+ retryAfterFn func(ctxDone bool) time.Duration
+}
+
+// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized.
+func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) {
+ if t.retryAfterFn == nil {
+ return
+ }
+ w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds())))
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/timeout.go b/vendor/github.com/go-chi/chi/middleware/timeout.go
new file mode 100644
index 0000000000..8e373536cf
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/timeout.go
@@ -0,0 +1,49 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "time"
+)
+
+// Timeout is a middleware that cancels ctx after a given timeout and return
+// a 504 Gateway Timeout error to the client.
+//
+// It's required that you select the ctx.Done() channel to check for the signal
+// if the context has reached its deadline and return, otherwise the timeout
+// signal will be just ignored.
+//
+// ie. a route/handler may look like:
+//
+// r.Get("/long", func(w http.ResponseWriter, r *http.Request) {
+// ctx := r.Context()
+// processTime := time.Duration(rand.Intn(4)+1) * time.Second
+//
+// select {
+// case <-ctx.Done():
+// return
+//
+// case <-time.After(processTime):
+// // The above channel simulates some hard work.
+// }
+//
+// w.Write([]byte("done"))
+// })
+//
+func Timeout(timeout time.Duration) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), timeout)
+ defer func() {
+ cancel()
+ if ctx.Err() == context.DeadlineExceeded {
+ w.WriteHeader(http.StatusGatewayTimeout)
+ }
+ }()
+
+ r = r.WithContext(ctx)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/url_format.go b/vendor/github.com/go-chi/chi/middleware/url_format.go
new file mode 100644
index 0000000000..5749e4f32b
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/url_format.go
@@ -0,0 +1,72 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "github.com/go-chi/chi"
+)
+
+var (
+ // URLFormatCtxKey is the context.Context key to store the URL format data
+ // for a request.
+ URLFormatCtxKey = &contextKey{"URLFormat"}
+)
+
+// URLFormat is a middleware that parses the url extension from a request path and stores it
+// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will
+// trim the suffix from the routing path and continue routing.
+//
+// Routers should not include a url parameter for the suffix when using this middleware.
+//
+// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml`
+//
+// func routes() http.Handler {
+// r := chi.NewRouter()
+// r.Use(middleware.URLFormat)
+//
+// r.Get("/articles/{id}", ListArticles)
+//
+// return r
+// }
+//
+// func ListArticles(w http.ResponseWriter, r *http.Request) {
+// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string)
+//
+// switch urlFormat {
+// case "json":
+// render.JSON(w, r, articles)
+// case "xml:"
+// render.XML(w, r, articles)
+// default:
+// render.JSON(w, r, articles)
+// }
+// }
+//
+func URLFormat(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var format string
+ path := r.URL.Path
+
+ if strings.Index(path, ".") > 0 {
+ base := strings.LastIndex(path, "/")
+ idx := strings.Index(path[base:], ".")
+
+ if idx > 0 {
+ idx += base
+ format = path[idx+1:]
+
+ rctx := chi.RouteContext(r.Context())
+ rctx.RoutePath = path[:idx]
+ }
+ }
+
+ r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format))
+
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/value.go b/vendor/github.com/go-chi/chi/middleware/value.go
new file mode 100644
index 0000000000..fbbd0393fb
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/value.go
@@ -0,0 +1,17 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+)
+
+// WithValue is a middleware that sets a given key/value in a context chain.
+func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ r = r.WithContext(context.WithValue(r.Context(), key, val))
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
new file mode 100644
index 0000000000..382a523e48
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
@@ -0,0 +1,180 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to
+// hook into various parts of the response process.
+func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter {
+ _, fl := w.(http.Flusher)
+
+ bw := basicWriter{ResponseWriter: w}
+
+ if protoMajor == 2 {
+ _, ps := w.(http.Pusher)
+ if fl && ps {
+ return &http2FancyWriter{bw}
+ }
+ } else {
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ if fl && hj && rf {
+ return &httpFancyWriter{bw}
+ }
+ }
+ if fl {
+ return &flushWriter{bw}
+ }
+
+ return &bw
+}
+
+// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook
+// into various parts of the response process.
+type WrapResponseWriter interface {
+ http.ResponseWriter
+ // Status returns the HTTP status of the request, or 0 if one has not
+ // yet been sent.
+ Status() int
+ // BytesWritten returns the total number of bytes sent to the client.
+ BytesWritten() int
+ // Tee causes the response body to be written to the given io.Writer in
+ // addition to proxying the writes through. Only one io.Writer can be
+ // tee'd to at once: setting a second one will overwrite the first.
+ // Writes will be sent to the proxy before being written to this
+ // io.Writer. It is illegal for the tee'd writer to be modified
+ // concurrently with writes.
+ Tee(io.Writer)
+ // Unwrap returns the original proxied target.
+ Unwrap() http.ResponseWriter
+}
+
+// basicWriter wraps a http.ResponseWriter that implements the minimal
+// http.ResponseWriter interface.
+type basicWriter struct {
+ http.ResponseWriter
+ wroteHeader bool
+ code int
+ bytes int
+ tee io.Writer
+}
+
+func (b *basicWriter) WriteHeader(code int) {
+ if !b.wroteHeader {
+ b.code = code
+ b.wroteHeader = true
+ b.ResponseWriter.WriteHeader(code)
+ }
+}
+
+func (b *basicWriter) Write(buf []byte) (int, error) {
+ b.maybeWriteHeader()
+ n, err := b.ResponseWriter.Write(buf)
+ if b.tee != nil {
+ _, err2 := b.tee.Write(buf[:n])
+ // Prefer errors generated by the proxied writer.
+ if err == nil {
+ err = err2
+ }
+ }
+ b.bytes += n
+ return n, err
+}
+
+func (b *basicWriter) maybeWriteHeader() {
+ if !b.wroteHeader {
+ b.WriteHeader(http.StatusOK)
+ }
+}
+
+func (b *basicWriter) Status() int {
+ return b.code
+}
+
+func (b *basicWriter) BytesWritten() int {
+ return b.bytes
+}
+
+func (b *basicWriter) Tee(w io.Writer) {
+ b.tee = w
+}
+
+func (b *basicWriter) Unwrap() http.ResponseWriter {
+ return b.ResponseWriter
+}
+
+type flushWriter struct {
+ basicWriter
+}
+
+func (f *flushWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &flushWriter{}
+
+// httpFancyWriter is a HTTP writer that additionally satisfies
+// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type httpFancyWriter struct {
+ basicWriter
+}
+
+func (f *httpFancyWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hj := f.basicWriter.ResponseWriter.(http.Hijacker)
+ return hj.Hijack()
+}
+
+func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error {
+ return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) {
+ if f.basicWriter.tee != nil {
+ n, err := io.Copy(&f.basicWriter, r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+ }
+ rf := f.basicWriter.ResponseWriter.(io.ReaderFrom)
+ f.basicWriter.maybeWriteHeader()
+ n, err := rf.ReadFrom(r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+}
+
+var _ http.Flusher = &httpFancyWriter{}
+var _ http.Hijacker = &httpFancyWriter{}
+var _ http.Pusher = &http2FancyWriter{}
+var _ io.ReaderFrom = &httpFancyWriter{}
+
+// http2FancyWriter is a HTTP2 writer that additionally satisfies
+// http.Flusher, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type http2FancyWriter struct {
+ basicWriter
+}
+
+func (f *http2FancyWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &http2FancyWriter{}
diff --git a/vendor/github.com/go-chi/chi/mux.go b/vendor/github.com/go-chi/chi/mux.go
new file mode 100644
index 0000000000..52950e97b5
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/mux.go
@@ -0,0 +1,466 @@
+package chi
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var _ Router = &Mux{}
+
+// Mux is a simple HTTP route multiplexer that parses a request path,
+// records any URL params, and executes an end handler. It implements
+// the http.Handler interface and is friendly with the standard library.
+//
+// Mux is designed to be fast, minimal and offer a powerful API for building
+// modular and composable HTTP services with a large set of handlers. It's
+// particularly useful for writing large REST API services that break a handler
+// into many smaller parts composed of middlewares and end handlers.
+type Mux struct {
+ // The radix trie router
+ tree *node
+
+ // The middleware stack
+ middlewares []func(http.Handler) http.Handler
+
+ // Controls the behaviour of middleware chain generation when a mux
+ // is registered as an inline group inside another mux.
+ inline bool
+ parent *Mux
+
+ // The computed mux handler made of the chained middleware stack and
+ // the tree router
+ handler http.Handler
+
+ // Routing context pool
+ pool *sync.Pool
+
+ // Custom route not found handler
+ notFoundHandler http.HandlerFunc
+
+ // Custom method not allowed handler
+ methodNotAllowedHandler http.HandlerFunc
+}
+
+// NewMux returns a newly initialized Mux object that implements the Router
+// interface.
+func NewMux() *Mux {
+ mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
+ mux.pool.New = func() interface{} {
+ return NewRouteContext()
+ }
+ return mux
+}
+
+// ServeHTTP is the single method of the http.Handler interface that makes
+// Mux interoperable with the standard library. It uses a sync.Pool to get and
+// reuse routing contexts for each request.
+func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Ensure the mux has some routes defined on the mux
+ if mx.handler == nil {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Check if a routing context already exists from a parent router.
+ rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
+ if rctx != nil {
+ mx.handler.ServeHTTP(w, r)
+ return
+ }
+
+ // Fetch a RouteContext object from the sync pool, and call the computed
+ // mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
+ // Once the request is finished, reset the routing context and put it back
+ // into the pool for reuse from another request.
+ rctx = mx.pool.Get().(*Context)
+ rctx.Reset()
+ rctx.Routes = mx
+
+ // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
+ r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
+
+ // Serve the request and once its done, put the request context back in the sync pool
+ mx.handler.ServeHTTP(w, r)
+ mx.pool.Put(rctx)
+}
+
+// Use appends a middleware handler to the Mux middleware stack.
+//
+// The middleware stack for any Mux will execute before searching for a matching
+// route to a specific handler, which provides opportunity to respond early,
+// change the course of the request execution, or set request-scoped values for
+// the next http.Handler.
+func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
+ if mx.handler != nil {
+ panic("chi: all middlewares must be defined before routes on a mux")
+ }
+ mx.middlewares = append(mx.middlewares, middlewares...)
+}
+
+// Handle adds the route `pattern` that matches any http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Handle(pattern string, handler http.Handler) {
+ mx.handle(mALL, pattern, handler)
+}
+
+// HandleFunc adds the route `pattern` that matches any http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mALL, pattern, handlerFn)
+}
+
+// Method adds the route `pattern` that matches `method` http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Method(method, pattern string, handler http.Handler) {
+ m, ok := methodMap[strings.ToUpper(method)]
+ if !ok {
+ panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
+ }
+ mx.handle(m, pattern, handler)
+}
+
+// MethodFunc adds the route `pattern` that matches `method` http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
+ mx.Method(method, pattern, handlerFn)
+}
+
+// Connect adds the route `pattern` that matches a CONNECT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mCONNECT, pattern, handlerFn)
+}
+
+// Delete adds the route `pattern` that matches a DELETE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mDELETE, pattern, handlerFn)
+}
+
+// Get adds the route `pattern` that matches a GET http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mGET, pattern, handlerFn)
+}
+
+// Head adds the route `pattern` that matches a HEAD http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mHEAD, pattern, handlerFn)
+}
+
+// Options adds the route `pattern` that matches a OPTIONS http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mOPTIONS, pattern, handlerFn)
+}
+
+// Patch adds the route `pattern` that matches a PATCH http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPATCH, pattern, handlerFn)
+}
+
+// Post adds the route `pattern` that matches a POST http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPOST, pattern, handlerFn)
+}
+
+// Put adds the route `pattern` that matches a PUT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPUT, pattern, handlerFn)
+}
+
+// Trace adds the route `pattern` that matches a TRACE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mTRACE, pattern, handlerFn)
+}
+
+// NotFound sets a custom http.HandlerFunc for routing paths that could
+// not be found. The default 404 handler is `http.NotFound`.
+func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
+ // Build NotFound handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the notFoundHandler from this point forward
+ m.notFoundHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.notFoundHandler == nil {
+ subMux.NotFound(hFn)
+ }
+ })
+}
+
+// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
+// method is unresolved. The default handler returns a 405 with an empty body.
+func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
+ // Build MethodNotAllowed handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the methodNotAllowedHandler from this point forward
+ m.methodNotAllowedHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.methodNotAllowedHandler == nil {
+ subMux.MethodNotAllowed(hFn)
+ }
+ })
+}
+
+// With adds inline middlewares for an endpoint handler.
+func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
+ // Similarly as in handle(), we must build the mux handler once additional
+ // middleware registration isn't allowed for this stack, like now.
+ if !mx.inline && mx.handler == nil {
+ mx.buildRouteHandler()
+ }
+
+ // Copy middlewares from parent inline muxs
+ var mws Middlewares
+ if mx.inline {
+ mws = make(Middlewares, len(mx.middlewares))
+ copy(mws, mx.middlewares)
+ }
+ mws = append(mws, middlewares...)
+
+ im := &Mux{
+ pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws,
+ notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler,
+ }
+
+ return im
+}
+
+// Group creates a new inline-Mux with a fresh middleware stack. It's useful
+// for a group of handlers along the same routing path that use an additional
+// set of middlewares. See _examples/.
+func (mx *Mux) Group(fn func(r Router)) Router {
+ im := mx.With().(*Mux)
+ if fn != nil {
+ fn(im)
+ }
+ return im
+}
+
+// Route creates a new Mux with a fresh middleware stack and mounts it
+// along the `pattern` as a subrouter. Effectively, this is a short-hand
+// call to Mount. See _examples/.
+func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
+ subRouter := NewRouter()
+ if fn != nil {
+ fn(subRouter)
+ }
+ mx.Mount(pattern, subRouter)
+ return subRouter
+}
+
+// Mount attaches another http.Handler or chi Router as a subrouter along a routing
+// path. It's very useful to split up a large API as many independent routers and
+// compose them as a single service using Mount. See _examples/.
+//
+// Note that Mount() simply sets a wildcard along the `pattern` that will continue
+// routing at the `handler`, which in most cases is another chi.Router. As a result,
+// if you define two Mount() routes on the exact same pattern the mount will panic.
+func (mx *Mux) Mount(pattern string, handler http.Handler) {
+ // Provide runtime safety for ensuring a pattern isn't mounted on an existing
+ // routing pattern.
+ if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
+ panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
+ }
+
+ // Assign sub-Router's with the parent not found & method not allowed handler if not specified.
+ subr, ok := handler.(*Mux)
+ if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
+ subr.NotFound(mx.notFoundHandler)
+ }
+ if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
+ subr.MethodNotAllowed(mx.methodNotAllowedHandler)
+ }
+
+ mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ rctx := RouteContext(r.Context())
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+ handler.ServeHTTP(w, r)
+ })
+
+ if pattern == "" || pattern[len(pattern)-1] != '/' {
+ mx.handle(mALL|mSTUB, pattern, mountHandler)
+ mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
+ pattern += "/"
+ }
+
+ method := mALL
+ subroutes, _ := handler.(Routes)
+ if subroutes != nil {
+ method |= mSTUB
+ }
+ n := mx.handle(method, pattern+"*", mountHandler)
+
+ if subroutes != nil {
+ n.subroutes = subroutes
+ }
+}
+
+// Routes returns a slice of routing information from the tree,
+// useful for traversing available routes of a router.
+func (mx *Mux) Routes() []Route {
+ return mx.tree.routes()
+}
+
+// Middlewares returns a slice of middleware handler functions.
+func (mx *Mux) Middlewares() Middlewares {
+ return mx.middlewares
+}
+
+// Match searches the routing tree for a handler that matches the method/path.
+// It's similar to routing a http request, but without executing the handler
+// thereafter.
+//
+// Note: the *Context state is updated during execution, so manage
+// the state carefully or make a NewRouteContext().
+func (mx *Mux) Match(rctx *Context, method, path string) bool {
+ m, ok := methodMap[method]
+ if !ok {
+ return false
+ }
+
+ node, _, h := mx.tree.FindRoute(rctx, m, path)
+
+ if node != nil && node.subroutes != nil {
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+ return node.subroutes.Match(rctx, method, rctx.RoutePath)
+ }
+
+ return h != nil
+}
+
+// NotFoundHandler returns the default Mux 404 responder whenever a route
+// cannot be found.
+func (mx *Mux) NotFoundHandler() http.HandlerFunc {
+ if mx.notFoundHandler != nil {
+ return mx.notFoundHandler
+ }
+ return http.NotFound
+}
+
+// MethodNotAllowedHandler returns the default Mux 405 responder whenever
+// a method cannot be resolved for a route.
+func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc {
+ if mx.methodNotAllowedHandler != nil {
+ return mx.methodNotAllowedHandler
+ }
+ return methodNotAllowedHandler
+}
+
+// buildRouteHandler builds the single mux handler that is a chain of the middleware
+// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
+// point, no other middlewares can be registered on this Mux's stack. But you can still
+// compose additional middlewares via Group()'s or using a chained middleware handler.
+func (mx *Mux) buildRouteHandler() {
+ mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
+}
+
+// handle registers a http.Handler in the routing tree for a particular http method
+// and routing pattern.
+func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
+ if len(pattern) == 0 || pattern[0] != '/' {
+ panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
+ }
+
+ // Build the computed routing handler for this routing pattern.
+ if !mx.inline && mx.handler == nil {
+ mx.buildRouteHandler()
+ }
+
+ // Build endpoint handler with inline middlewares for the route
+ var h http.Handler
+ if mx.inline {
+ mx.handler = http.HandlerFunc(mx.routeHTTP)
+ h = Chain(mx.middlewares...).Handler(handler)
+ } else {
+ h = handler
+ }
+
+ // Add the endpoint to the tree and return the node
+ return mx.tree.InsertRoute(method, pattern, h)
+}
+
+// routeHTTP routes a http.Request through the Mux routing tree to serve
+// the matching handler for a particular http method.
+func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Grab the route context object
+ rctx := r.Context().Value(RouteCtxKey).(*Context)
+
+ // The request routing path
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ }
+
+ // Check if method is supported by chi
+ if rctx.RouteMethod == "" {
+ rctx.RouteMethod = r.Method
+ }
+ method, ok := methodMap[rctx.RouteMethod]
+ if !ok {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Find the route
+ if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
+ h.ServeHTTP(w, r)
+ return
+ }
+ if rctx.methodNotAllowed {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ } else {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ }
+}
+
+func (mx *Mux) nextRoutePath(rctx *Context) string {
+ routePath := "/"
+ nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
+ if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
+ routePath = "/" + rctx.routeParams.Values[nx]
+ }
+ return routePath
+}
+
+// Recursively update data on child routers.
+func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
+ for _, r := range mx.tree.routes() {
+ subMux, ok := r.SubRoutes.(*Mux)
+ if !ok {
+ continue
+ }
+ fn(subMux)
+ }
+}
+
+// methodNotAllowedHandler is a helper function to respond with a 405,
+// method not allowed.
+func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(405)
+ w.Write(nil)
+}
diff --git a/vendor/github.com/go-chi/chi/tree.go b/vendor/github.com/go-chi/chi/tree.go
new file mode 100644
index 0000000000..59b5b5f7b0
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/tree.go
@@ -0,0 +1,865 @@
+package chi
+
+// Radix tree implementation below is a based on the original work by
+// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
+// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
+
+import (
+ "fmt"
+ "math"
+ "net/http"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+type methodTyp int
+
+const (
+ mSTUB methodTyp = 1 << iota
+ mCONNECT
+ mDELETE
+ mGET
+ mHEAD
+ mOPTIONS
+ mPATCH
+ mPOST
+ mPUT
+ mTRACE
+)
+
+var mALL = mCONNECT | mDELETE | mGET | mHEAD |
+ mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
+
+var methodMap = map[string]methodTyp{
+ http.MethodConnect: mCONNECT,
+ http.MethodDelete: mDELETE,
+ http.MethodGet: mGET,
+ http.MethodHead: mHEAD,
+ http.MethodOptions: mOPTIONS,
+ http.MethodPatch: mPATCH,
+ http.MethodPost: mPOST,
+ http.MethodPut: mPUT,
+ http.MethodTrace: mTRACE,
+}
+
+// RegisterMethod adds support for custom HTTP method handlers, available
+// via Router#Method and Router#MethodFunc
+func RegisterMethod(method string) {
+ if method == "" {
+ return
+ }
+ method = strings.ToUpper(method)
+ if _, ok := methodMap[method]; ok {
+ return
+ }
+ n := len(methodMap)
+ if n > strconv.IntSize {
+ panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
+ }
+ mt := methodTyp(math.Exp2(float64(n)))
+ methodMap[method] = mt
+ mALL |= mt
+}
+
+type nodeTyp uint8
+
+const (
+ ntStatic nodeTyp = iota // /home
+ ntRegexp // /{id:[0-9]+}
+ ntParam // /{user}
+ ntCatchAll // /api/v1/*
+)
+
+type node struct {
+ // node type: static, regexp, param, catchAll
+ typ nodeTyp
+
+ // first byte of the prefix
+ label byte
+
+ // first byte of the child prefix
+ tail byte
+
+ // prefix is the common prefix we ignore
+ prefix string
+
+ // regexp matcher for regexp nodes
+ rex *regexp.Regexp
+
+ // HTTP handler endpoints on the leaf node
+ endpoints endpoints
+
+ // subroutes on the leaf node
+ subroutes Routes
+
+ // child nodes should be stored in-order for iteration,
+ // in groups of the node type.
+ children [ntCatchAll + 1]nodes
+}
+
+// endpoints is a mapping of http method constants to handlers
+// for a given route.
+type endpoints map[methodTyp]*endpoint
+
+type endpoint struct {
+ // endpoint handler
+ handler http.Handler
+
+ // pattern is the routing pattern for handler nodes
+ pattern string
+
+ // parameter keys recorded on handler nodes
+ paramKeys []string
+}
+
+func (s endpoints) Value(method methodTyp) *endpoint {
+ mh, ok := s[method]
+ if !ok {
+ mh = &endpoint{}
+ s[method] = mh
+ }
+ return mh
+}
+
+func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
+ var parent *node
+ search := pattern
+
+ for {
+ // Handle key exhaustion
+ if len(search) == 0 {
+ // Insert or update the node's leaf handler
+ n.setEndpoint(method, handler, pattern)
+ return n
+ }
+
+ // We're going to be searching for a wild node next,
+ // in this case, we need to get the tail
+ var label = search[0]
+ var segTail byte
+ var segEndIdx int
+ var segTyp nodeTyp
+ var segRexpat string
+ if label == '{' || label == '*' {
+ segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
+ }
+
+ var prefix string
+ if segTyp == ntRegexp {
+ prefix = segRexpat
+ }
+
+ // Look for the edge to attach to
+ parent = n
+ n = n.getEdge(segTyp, label, segTail, prefix)
+
+ // No edge, create one
+ if n == nil {
+ child := &node{label: label, tail: segTail, prefix: search}
+ hn := parent.addChild(child, search)
+ hn.setEndpoint(method, handler, pattern)
+
+ return hn
+ }
+
+ // Found an edge to match the pattern
+
+ if n.typ > ntStatic {
+ // We found a param node, trim the param from the search path and continue.
+ // This param/wild pattern segment would already be on the tree from a previous
+ // call to addChild when creating a new node.
+ search = search[segEndIdx:]
+ continue
+ }
+
+ // Static nodes fall below here.
+ // Determine longest prefix of the search key on match.
+ commonPrefix := longestPrefix(search, n.prefix)
+ if commonPrefix == len(n.prefix) {
+ // the common prefix is as long as the current node's prefix we're attempting to insert.
+ // keep the search going.
+ search = search[commonPrefix:]
+ continue
+ }
+
+ // Split the node
+ child := &node{
+ typ: ntStatic,
+ prefix: search[:commonPrefix],
+ }
+ parent.replaceChild(search[0], segTail, child)
+
+ // Restore the existing node
+ n.label = n.prefix[commonPrefix]
+ n.prefix = n.prefix[commonPrefix:]
+ child.addChild(n, n.prefix)
+
+ // If the new key is a subset, set the method/handler on this node and finish.
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ child.setEndpoint(method, handler, pattern)
+ return child
+ }
+
+ // Create a new edge for the node
+ subchild := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn := child.addChild(subchild, search)
+ hn.setEndpoint(method, handler, pattern)
+ return hn
+ }
+}
+
+// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
+// For a URL router like chi's, we split the static, param, regexp and wildcard segments
+// into different nodes. In addition, addChild will recursively call itself until every
+// pattern segment is added to the url pattern tree as individual nodes, depending on type.
+func (n *node) addChild(child *node, prefix string) *node {
+ search := prefix
+
+ // handler leaf node added to the tree is the child.
+ // this may be overridden later down the flow
+ hn := child
+
+ // Parse next segment
+ segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
+
+ // Add child depending on next up segment
+ switch segTyp {
+
+ case ntStatic:
+ // Search prefix is all static (that is, has no params in path)
+ // noop
+
+ default:
+ // Search prefix contains a param, regexp or wildcard
+
+ if segTyp == ntRegexp {
+ rex, err := regexp.Compile(segRexpat)
+ if err != nil {
+ panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
+ }
+ child.prefix = segRexpat
+ child.rex = rex
+ }
+
+ if segStartIdx == 0 {
+ // Route starts with a param
+ child.typ = segTyp
+
+ if segTyp == ntCatchAll {
+ segStartIdx = -1
+ } else {
+ segStartIdx = segEndIdx
+ }
+ if segStartIdx < 0 {
+ segStartIdx = len(search)
+ }
+ child.tail = segTail // for params, we set the tail
+
+ if segStartIdx != len(search) {
+ // add static edge for the remaining part, split the end.
+ // its not possible to have adjacent param nodes, so its certainly
+ // going to be a static node next.
+
+ search = search[segStartIdx:] // advance search position
+
+ nn := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn = child.addChild(nn, search)
+ }
+
+ } else if segStartIdx > 0 {
+ // Route has some param
+
+ // starts with a static segment
+ child.typ = ntStatic
+ child.prefix = search[:segStartIdx]
+ child.rex = nil
+
+ // add the param edge node
+ search = search[segStartIdx:]
+
+ nn := &node{
+ typ: segTyp,
+ label: search[0],
+ tail: segTail,
+ }
+ hn = child.addChild(nn, search)
+
+ }
+ }
+
+ n.children[child.typ] = append(n.children[child.typ], child)
+ n.children[child.typ].Sort()
+ return hn
+}
+
+func (n *node) replaceChild(label, tail byte, child *node) {
+ for i := 0; i < len(n.children[child.typ]); i++ {
+ if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
+ n.children[child.typ][i] = child
+ n.children[child.typ][i].label = label
+ n.children[child.typ][i].tail = tail
+ return
+ }
+ }
+ panic("chi: replacing missing child")
+}
+
+func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
+ nds := n.children[ntyp]
+ for i := 0; i < len(nds); i++ {
+ if nds[i].label == label && nds[i].tail == tail {
+ if ntyp == ntRegexp && nds[i].prefix != prefix {
+ continue
+ }
+ return nds[i]
+ }
+ }
+ return nil
+}
+
+func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
+ // Set the handler for the method type on the node
+ if n.endpoints == nil {
+ n.endpoints = make(endpoints)
+ }
+
+ paramKeys := patParamKeys(pattern)
+
+ if method&mSTUB == mSTUB {
+ n.endpoints.Value(mSTUB).handler = handler
+ }
+ if method&mALL == mALL {
+ h := n.endpoints.Value(mALL)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ for _, m := range methodMap {
+ h := n.endpoints.Value(m)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+ } else {
+ h := n.endpoints.Value(method)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+}
+
+func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
+ // Reset the context routing pattern and params
+ rctx.routePattern = ""
+ rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
+ rctx.routeParams.Values = rctx.routeParams.Values[:0]
+
+ // Find the routing handlers for the path
+ rn := n.findRoute(rctx, method, path)
+ if rn == nil {
+ return nil, nil, nil
+ }
+
+ // Record the routing params in the request lifecycle
+ rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
+ rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
+
+ // Record the routing pattern in the request lifecycle
+ if rn.endpoints[method].pattern != "" {
+ rctx.routePattern = rn.endpoints[method].pattern
+ rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
+ }
+
+ return rn, rn.endpoints, rn.endpoints[method].handler
+}
+
+// Recursive edge traversal by checking all nodeTyp groups along the way.
+// It's like searching through a multi-dimensional radix trie.
+func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
+ nn := n
+ search := path
+
+ for t, nds := range nn.children {
+ ntyp := nodeTyp(t)
+ if len(nds) == 0 {
+ continue
+ }
+
+ var xn *node
+ xsearch := search
+
+ var label byte
+ if search != "" {
+ label = search[0]
+ }
+
+ switch ntyp {
+ case ntStatic:
+ xn = nds.findEdge(label)
+ if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
+ continue
+ }
+ xsearch = xsearch[len(xn.prefix):]
+
+ case ntParam, ntRegexp:
+ // short-circuit and return no matching route for empty param values
+ if xsearch == "" {
+ continue
+ }
+
+ // serially loop through each node grouped by the tail delimiter
+ for idx := 0; idx < len(nds); idx++ {
+ xn = nds[idx]
+
+ // label for param nodes is the delimiter byte
+ p := strings.IndexByte(xsearch, xn.tail)
+
+ if p < 0 {
+ if xn.tail == '/' {
+ p = len(xsearch)
+ } else {
+ continue
+ }
+ }
+
+ if ntyp == ntRegexp && xn.rex != nil {
+ if !xn.rex.Match([]byte(xsearch[:p])) {
+ continue
+ }
+ } else if strings.IndexByte(xsearch[:p], '/') != -1 {
+ // avoid a match across path segments
+ continue
+ }
+
+ prevlen := len(rctx.routeParams.Values)
+ rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
+ xsearch = xsearch[p:]
+
+ if len(xsearch) == 0 {
+ if xn.isLeaf() {
+ h := xn.endpoints[method]
+ if h != nil && h.handler != nil {
+ rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
+ return xn
+ }
+
+ // flag that the routing context found a route, but not a corresponding
+ // supported method
+ rctx.methodNotAllowed = true
+ }
+ }
+
+ // recursively find the next node on this branch
+ fin := xn.findRoute(rctx, method, xsearch)
+ if fin != nil {
+ return fin
+ }
+
+ // not found on this branch, reset vars
+ rctx.routeParams.Values = rctx.routeParams.Values[:prevlen]
+ xsearch = search
+ }
+
+ rctx.routeParams.Values = append(rctx.routeParams.Values, "")
+
+ default:
+ // catch-all nodes
+ rctx.routeParams.Values = append(rctx.routeParams.Values, search)
+ xn = nds[0]
+ xsearch = ""
+ }
+
+ if xn == nil {
+ continue
+ }
+
+ // did we find it yet?
+ if len(xsearch) == 0 {
+ if xn.isLeaf() {
+ h := xn.endpoints[method]
+ if h != nil && h.handler != nil {
+ rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
+ return xn
+ }
+
+ // flag that the routing context found a route, but not a corresponding
+ // supported method
+ rctx.methodNotAllowed = true
+ }
+ }
+
+ // recursively find the next node..
+ fin := xn.findRoute(rctx, method, xsearch)
+ if fin != nil {
+ return fin
+ }
+
+ // Did not find final handler, let's remove the param here if it was set
+ if xn.typ > ntStatic {
+ if len(rctx.routeParams.Values) > 0 {
+ rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
+ nds := n.children[ntyp]
+ num := len(nds)
+ idx := 0
+
+ switch ntyp {
+ case ntStatic, ntParam, ntRegexp:
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > nds[idx].label {
+ i = idx + 1
+ } else if label < nds[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if nds[idx].label != label {
+ return nil
+ }
+ return nds[idx]
+
+ default: // catch all
+ return nds[idx]
+ }
+}
+
+func (n *node) isLeaf() bool {
+ return n.endpoints != nil
+}
+
+func (n *node) findPattern(pattern string) bool {
+ nn := n
+ for _, nds := range nn.children {
+ if len(nds) == 0 {
+ continue
+ }
+
+ n = nn.findEdge(nds[0].typ, pattern[0])
+ if n == nil {
+ continue
+ }
+
+ var idx int
+ var xpattern string
+
+ switch n.typ {
+ case ntStatic:
+ idx = longestPrefix(pattern, n.prefix)
+ if idx < len(n.prefix) {
+ continue
+ }
+
+ case ntParam, ntRegexp:
+ idx = strings.IndexByte(pattern, '}') + 1
+
+ case ntCatchAll:
+ idx = longestPrefix(pattern, "*")
+
+ default:
+ panic("chi: unknown node type")
+ }
+
+ xpattern = pattern[idx:]
+ if len(xpattern) == 0 {
+ return true
+ }
+
+ return n.findPattern(xpattern)
+ }
+ return false
+}
+
+func (n *node) routes() []Route {
+ rts := []Route{}
+
+ n.walk(func(eps endpoints, subroutes Routes) bool {
+ if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
+ return false
+ }
+
+ // Group methodHandlers by unique patterns
+ pats := make(map[string]endpoints)
+
+ for mt, h := range eps {
+ if h.pattern == "" {
+ continue
+ }
+ p, ok := pats[h.pattern]
+ if !ok {
+ p = endpoints{}
+ pats[h.pattern] = p
+ }
+ p[mt] = h
+ }
+
+ for p, mh := range pats {
+ hs := make(map[string]http.Handler)
+ if mh[mALL] != nil && mh[mALL].handler != nil {
+ hs["*"] = mh[mALL].handler
+ }
+
+ for mt, h := range mh {
+ if h.handler == nil {
+ continue
+ }
+ m := methodTypString(mt)
+ if m == "" {
+ continue
+ }
+ hs[m] = h.handler
+ }
+
+ rt := Route{p, hs, subroutes}
+ rts = append(rts, rt)
+ }
+
+ return false
+ })
+
+ return rts
+}
+
+func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
+ // Visit the leaf values if any
+ if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, ns := range n.children {
+ for _, cn := range ns {
+ if cn.walk(fn) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// patNextSegment returns the next segment details from a pattern:
+// node type, param key, regexp string, param tail byte, param starting index, param ending index
+func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
+ ps := strings.Index(pattern, "{")
+ ws := strings.Index(pattern, "*")
+
+ if ps < 0 && ws < 0 {
+ return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
+ }
+
+ // Sanity check
+ if ps >= 0 && ws >= 0 && ws < ps {
+ panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
+ }
+
+ var tail byte = '/' // Default endpoint tail to / byte
+
+ if ps >= 0 {
+ // Param/Regexp pattern is next
+ nt := ntParam
+
+ // Read to closing } taking into account opens and closes in curl count (cc)
+ cc := 0
+ pe := ps
+ for i, c := range pattern[ps:] {
+ if c == '{' {
+ cc++
+ } else if c == '}' {
+ cc--
+ if cc == 0 {
+ pe = ps + i
+ break
+ }
+ }
+ }
+ if pe == ps {
+ panic("chi: route param closing delimiter '}' is missing")
+ }
+
+ key := pattern[ps+1 : pe]
+ pe++ // set end to next position
+
+ if pe < len(pattern) {
+ tail = pattern[pe]
+ }
+
+ var rexpat string
+ if idx := strings.Index(key, ":"); idx >= 0 {
+ nt = ntRegexp
+ rexpat = key[idx+1:]
+ key = key[:idx]
+ }
+
+ if len(rexpat) > 0 {
+ if rexpat[0] != '^' {
+ rexpat = "^" + rexpat
+ }
+ if rexpat[len(rexpat)-1] != '$' {
+ rexpat += "$"
+ }
+ }
+
+ return nt, key, rexpat, tail, ps, pe
+ }
+
+ // Wildcard pattern as finale
+ if ws < len(pattern)-1 {
+ panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
+ }
+ return ntCatchAll, "*", "", 0, ws, len(pattern)
+}
+
+func patParamKeys(pattern string) []string {
+ pat := pattern
+ paramKeys := []string{}
+ for {
+ ptyp, paramKey, _, _, _, e := patNextSegment(pat)
+ if ptyp == ntStatic {
+ return paramKeys
+ }
+ for i := 0; i < len(paramKeys); i++ {
+ if paramKeys[i] == paramKey {
+ panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
+ }
+ }
+ paramKeys = append(paramKeys, paramKey)
+ pat = pat[e:]
+ }
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 string) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+func methodTypString(method methodTyp) string {
+ for s, t := range methodMap {
+ if method == t {
+ return s
+ }
+ }
+ return ""
+}
+
+type nodes []*node
+
+// Sort the list of nodes by label
+func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
+func (ns nodes) Len() int { return len(ns) }
+func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
+func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
+
+// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
+// The list order determines the traversal order.
+func (ns nodes) tailSort() {
+ for i := len(ns) - 1; i >= 0; i-- {
+ if ns[i].typ > ntStatic && ns[i].tail == '/' {
+ ns.Swap(i, len(ns)-1)
+ return
+ }
+ }
+}
+
+func (ns nodes) findEdge(label byte) *node {
+ num := len(ns)
+ idx := 0
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > ns[idx].label {
+ i = idx + 1
+ } else if label < ns[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if ns[idx].label != label {
+ return nil
+ }
+ return ns[idx]
+}
+
+// Route describes the details of a routing handler.
+// Handlers map key is an HTTP method
+type Route struct {
+ Pattern string
+ Handlers map[string]http.Handler
+ SubRoutes Routes
+}
+
+// WalkFunc is the type of the function called for each method and route visited by Walk.
+type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
+
+// Walk walks any router tree that implements Routes interface.
+func Walk(r Routes, walkFn WalkFunc) error {
+ return walk(r, walkFn, "")
+}
+
+func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
+ for _, route := range r.Routes() {
+ mws := make([]func(http.Handler) http.Handler, len(parentMw))
+ copy(mws, parentMw)
+ mws = append(mws, r.Middlewares()...)
+
+ if route.SubRoutes != nil {
+ if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
+ return err
+ }
+ continue
+ }
+
+ for method, handler := range route.Handlers {
+ if method == "*" {
+ // Ignore a "catchAll" method, since we pass down all the specific methods for each route.
+ continue
+ }
+
+ fullRoute := parentRoute + route.Pattern
+ fullRoute = strings.Replace(fullRoute, "/*/", "/", -1)
+
+ if chain, ok := handler.(*ChainHandler); ok {
+ if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
+ return err
+ }
+ } else {
+ if err := walkFn(method, fullRoute, handler, mws...); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
index 94ff801df1..0cffafa7bf 100644
--- a/vendor/github.com/go-logr/logr/.golangci.yaml
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -6,7 +6,6 @@ linters:
disable-all: true
enable:
- asciicheck
- - deadcode
- errcheck
- forcetypeassert
- gocritic
@@ -18,10 +17,8 @@ linters:
- misspell
- revive
- staticcheck
- - structcheck
- typecheck
- unused
- - varcheck
issues:
exclude-use-default: false
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
index 9d92a38f1d..99fe8be93c 100644
--- a/vendor/github.com/go-logr/logr/discard.go
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -20,35 +20,5 @@ package logr
// used whenever the caller is not interested in the logs. Logger instances
// produced by this function always compare as equal.
func Discard() Logger {
- return Logger{
- level: 0,
- sink: discardLogSink{},
- }
-}
-
-// discardLogSink is a LogSink that discards all messages.
-type discardLogSink struct{}
-
-// Verify that it actually implements the interface
-var _ LogSink = discardLogSink{}
-
-func (l discardLogSink) Init(RuntimeInfo) {
-}
-
-func (l discardLogSink) Enabled(int) bool {
- return false
-}
-
-func (l discardLogSink) Info(int, string, ...interface{}) {
-}
-
-func (l discardLogSink) Error(error, string, ...interface{}) {
-}
-
-func (l discardLogSink) WithValues(...interface{}) LogSink {
- return l
-}
-
-func (l discardLogSink) WithName(string) LogSink {
- return l
+ return New(nil)
}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
index 7accdb0c40..e52f0cd01e 100644
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -21,13 +21,13 @@ limitations under the License.
// github.com/go-logr/logr.LogSink with output through an arbitrary
// "write" function. See New and NewJSON for details.
//
-// Custom LogSinks
+// # Custom LogSinks
//
// For users who need more control, a funcr.Formatter can be embedded inside
// your own custom LogSink implementation. This is useful when the LogSink
// needs to implement additional methods, for example.
//
-// Formatting
+// # Formatting
//
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
// values which are being logged. When rendering a struct, funcr will use Go's
@@ -37,6 +37,7 @@ package funcr
import (
"bytes"
"encoding"
+ "encoding/json"
"fmt"
"path/filepath"
"reflect"
@@ -217,7 +218,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
prefix: "",
values: nil,
depth: 0,
- opts: opts,
+ opts: &opts,
}
return f
}
@@ -231,7 +232,7 @@ type Formatter struct {
values []interface{}
valuesStr string
depth int
- opts Options
+ opts *Options
}
// outputFormat indicates which outputFormat to use.
@@ -447,6 +448,7 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
for i := 0; i < t.NumField(); i++ {
fld := t.Field(i)
if fld.PkgPath != "" {
@@ -478,9 +480,10 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
if omitempty && isEmpty(v.Field(i)) {
continue
}
- if i > 0 {
+ if printComma {
buf.WriteByte(',')
}
+ printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
continue
@@ -500,6 +503,20 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
}
return buf.String()
case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
index c3b56b3d2c..e027aea3fd 100644
--- a/vendor/github.com/go-logr/logr/logr.go
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -21,7 +21,7 @@ limitations under the License.
// to back that API. Packages in the Go ecosystem can depend on this package,
// while callers can implement logging with whatever backend is appropriate.
//
-// Usage
+// # Usage
//
// Logging is done using a Logger instance. Logger is a concrete type with
// methods, which defers the actual logging to a LogSink interface. The main
@@ -30,16 +30,20 @@ limitations under the License.
// "structured logging".
//
// With Go's standard log package, we might write:
-// log.Printf("setting target value %s", targetValue)
+//
+// log.Printf("setting target value %s", targetValue)
//
// With logr's structured logging, we'd write:
-// logger.Info("setting target", "value", targetValue)
+//
+// logger.Info("setting target", "value", targetValue)
//
// Errors are much the same. Instead of:
-// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
//
// We'd write:
-// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
//
// Info() and Error() are very similar, but they are separate methods so that
// LogSink implementations can choose to do things like attach additional
@@ -47,7 +51,7 @@ limitations under the License.
// always logged, regardless of the current verbosity. If there is no error
// instance available, passing nil is valid.
//
-// Verbosity
+// # Verbosity
//
// Often we want to log information only when the application in "verbose
// mode". To write log lines that are more verbose, Logger has a V() method.
@@ -58,20 +62,22 @@ limitations under the License.
// Error messages do not have a verbosity level and are always logged.
//
// Where we might have written:
-// if flVerbose >= 2 {
-// log.Printf("an unusual thing happened")
-// }
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
//
// We can write:
-// logger.V(2).Info("an unusual thing happened")
//
-// Logger Names
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
//
// Logger instances can have name strings so that all messages logged through
// that instance have additional context. For example, you might want to add
// a subsystem name:
//
-// logger.WithName("compactor").Info("started", "time", time.Now())
+// logger.WithName("compactor").Info("started", "time", time.Now())
//
// The WithName() method returns a new Logger, which can be passed to
// constructors or other functions for further use. Repeated use of WithName()
@@ -82,25 +88,27 @@ limitations under the License.
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
// quotes, etc).
//
-// Saved Values
+// # Saved Values
//
// Logger instances can store any number of key/value pairs, which will be
// logged alongside all messages logged through that instance. For example,
// you might want to create a Logger instance per managed object:
//
// With the standard log package, we might write:
-// log.Printf("decided to set field foo to value %q for object %s/%s",
-// targetValue, object.Namespace, object.Name)
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
//
// With logr we'd write:
-// // Elsewhere: set up the logger to log the object name.
-// obj.logger = mainLogger.WithValues(
-// "name", obj.name, "namespace", obj.namespace)
//
-// // later on...
-// obj.logger.Info("setting foo", "value", targetValue)
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
//
-// Best Practices
+// # Best Practices
//
// Logger has very few hard rules, with the goal that LogSink implementations
// might have a lot of freedom to differentiate. There are, however, some
@@ -124,15 +132,15 @@ limitations under the License.
// around. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
-// Key Naming Conventions
+// # Key Naming Conventions
//
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
-// * be human-readable and meaningful (not auto-generated or simple ordinals)
-// * be constant (not dependent on input data)
-// * contain only printable characters
-// * not contain whitespace or punctuation
-// * use lower case for simple keys and lowerCamelCase for more complex ones
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
@@ -141,51 +149,54 @@ limitations under the License.
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
-// * "caller": the calling information (file/line) of a particular log line
-// * "error": the underlying error value in the `Error` method
-// * "level": the log level
-// * "logger": the name of the associated logger
-// * "msg": the log message
-// * "stacktrace": the stack trace associated with a particular log line or
-// error (often from the `Error` message)
-// * "ts": the timestamp for a log line
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
-// Break Glass
+// # Break Glass
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
-// // Underlier exposes access to the underlying logging implementation.
-// // Since callers only have a logr.Logger, they have to know which
-// // implementation is in use, so this interface is less of an abstraction
-// // and more of way to test type conversion.
-// type Underlier interface {
-// GetUnderlying()
-// }
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
//
// Logger grants access to the sink to enable type assertions like this:
-// func DoSomethingWithImpl(log logr.Logger) {
-// if underlier, ok := log.GetSink()(impl.Underlier) {
-// implLogger := underlier.GetUnderlying()
-// ...
-// }
-// }
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
//
// Custom `With*` functions can be implemented by copying the complete
// Logger struct and replacing the sink in the copy:
-// // WithFooBar changes the foobar parameter in the log sink and returns a
-// // new logger with that modified sink. It does nothing for loggers where
-// // the sink doesn't support that parameter.
-// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
-// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
-// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
-// }
-// return log
-// }
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
//
// Don't use New to construct a new Logger with a LogSink retrieved from an
// existing Logger. Source code attribution might not work correctly and
@@ -201,11 +212,14 @@ import (
)
// New returns a new Logger instance. This is primarily used by libraries
-// implementing LogSink, rather than end users.
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
func New(sink LogSink) Logger {
logger := Logger{}
logger.setSink(sink)
- sink.Init(runtimeInfo)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
return logger
}
@@ -244,7 +258,7 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
- return l.sink.Enabled(l.level)
+ return l.sink != nil && l.sink.Enabled(l.level)
}
// Info logs a non-error message with the given key/value pairs as context.
@@ -254,6 +268,9 @@ func (l Logger) Enabled() bool {
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
+ if l.sink == nil {
+ return
+ }
if l.Enabled() {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
@@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+ if l.sink == nil {
+ return
+ }
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
// level means a log message is less important. Negative V-levels are treated
// as 0.
func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
if level < 0 {
level = 0
}
@@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger {
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithValues(keysAndValues...))
return l
}
@@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
// contain only letters, digits, and hyphens (see the package documentation for
// more information).
func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
l.setSink(l.sink.WithName(name))
return l
}
@@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger {
// WithCallDepth(1) because it works with implementions that support the
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(depth))
}
@@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger {
// implementation does not support either of these, the original Logger will be
// returned.
func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
var helper func()
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(1))
@@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) {
return helper, l
}
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
+}
+
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
@@ -442,7 +482,7 @@ type LogSink interface {
WithName(name string) LogSink
}
-// CallDepthLogSink represents a Logger that knows how to climb the call stack
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods.
@@ -467,7 +507,7 @@ type CallDepthLogSink interface {
WithCallDepth(depth int) LogSink
}
-// CallStackHelperLogSink represents a Logger that knows how to climb
+// CallStackHelperLogSink represents a LogSink that knows how to climb
// the call stack to identify the original call site and can skip
// intermediate helper functions if they mark themselves as
// helper. Go's testing package uses that approach.
diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go
index d7dc564e9a..4c00ed3a5a 100644
--- a/vendor/github.com/go-openapi/runtime/client/request.go
+++ b/vendor/github.com/go-openapi/runtime/client/request.go
@@ -161,15 +161,22 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run
}()
for fn, f := range r.fileFields {
for _, fi := range f {
- // Need to read the data so that we can detect the content type
- buf := make([]byte, 512)
- size, err := fi.Read(buf)
- if err != nil {
- logClose(err, pw)
- return
+ var fileContentType string
+ if p, ok := fi.(interface {
+ ContentType() string
+ }); ok {
+ fileContentType = p.ContentType()
+ } else {
+ // Need to read the data so that we can detect the content type
+ buf := make([]byte, 512)
+ size, err := fi.Read(buf)
+ if err != nil {
+ logClose(err, pw)
+ return
+ }
+ fileContentType = http.DetectContentType(buf)
+ fi = runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi))
}
- fileContentType := http.DetectContentType(buf)
- newFi := runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi))
// Create the MIME headers for the new part
h := make(textproto.MIMEHeader)
@@ -183,7 +190,7 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run
logClose(err, pw)
return
}
- if _, err := io.Copy(wrtr, newFi); err != nil {
+ if _, err := io.Copy(wrtr, fi); err != nil {
logClose(err, pw)
}
}
diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go
index c458b49b21..582f0fd4c4 100644
--- a/vendor/github.com/go-openapi/spec/info.go
+++ b/vendor/github.com/go-openapi/spec/info.go
@@ -16,6 +16,7 @@ package spec
import (
"encoding/json"
+ "strconv"
"strings"
"github.com/go-openapi/jsonpointer"
@@ -40,6 +41,24 @@ func (e Extensions) GetString(key string) (string, bool) {
return "", false
}
+// GetInt gets a int value from the extensions
+func (e Extensions) GetInt(key string) (int, bool) {
+ realKey := strings.ToLower(key)
+
+ if v, ok := e.GetString(realKey); ok {
+ if r, err := strconv.Atoi(v); err == nil {
+ return r, true
+ }
+ }
+
+ if v, ok := e[realKey]; ok {
+ if r, rOk := v.(float64); rOk {
+ return int(r), true
+ }
+ }
+ return -1, false
+}
+
// GetBool gets a string value from the extensions
func (e Extensions) GetBool(key string) (bool, bool) {
if v, ok := e[strings.ToLower(key)]; ok {
diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go
index 2af13787ab..91d2435f01 100644
--- a/vendor/github.com/go-openapi/spec/properties.go
+++ b/vendor/github.com/go-openapi/spec/properties.go
@@ -42,8 +42,8 @@ func (items OrderSchemaItems) MarshalJSON() ([]byte, error) {
func (items OrderSchemaItems) Len() int { return len(items) }
func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] }
func (items OrderSchemaItems) Less(i, j int) (ret bool) {
- ii, oki := items[i].Extensions.GetString("x-order")
- ij, okj := items[j].Extensions.GetString("x-order")
+ ii, oki := items[i].Extensions.GetInt("x-order")
+ ij, okj := items[j].Extensions.GetInt("x-order")
if oki {
if okj {
defer func() {
@@ -56,7 +56,7 @@ func (items OrderSchemaItems) Less(i, j int) (ret bool) {
ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String()
}
}()
- return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int()
+ return ii < ij
}
return true
} else if okj {
diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go
index 4efb6f868b..16c3076fe8 100644
--- a/vendor/github.com/go-openapi/spec/responses.go
+++ b/vendor/github.com/go-openapi/spec/responses.go
@@ -19,6 +19,7 @@ import (
"fmt"
"reflect"
"strconv"
+ "strings"
"github.com/go-openapi/swag"
)
@@ -62,6 +63,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
return err
}
+
if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
return err
}
@@ -107,20 +109,31 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals responses from JSON
func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
- var res map[string]Response
+ var res map[string]json.RawMessage
if err := json.Unmarshal(data, &res); err != nil {
- return nil
+ return err
}
+
if v, ok := res["default"]; ok {
- r.Default = &v
+ var defaultRes Response
+ if err := json.Unmarshal(v, &defaultRes); err != nil {
+ return err
+ }
+ r.Default = &defaultRes
delete(res, "default")
}
for k, v := range res {
- if nk, err := strconv.Atoi(k); err == nil {
- if r.StatusCodeResponses == nil {
- r.StatusCodeResponses = map[int]Response{}
+ if !strings.HasPrefix(k, "x-") {
+ var statusCodeResp Response
+ if err := json.Unmarshal(v, &statusCodeResp); err != nil {
+ return err
+ }
+ if nk, err := strconv.Atoi(k); err == nil {
+ if r.StatusCodeResponses == nil {
+ r.StatusCodeResponses = map[int]Response{}
+ }
+ r.StatusCodeResponses[nk] = statusCodeResp
}
- r.StatusCodeResponses[nk] = v
}
}
return nil
diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md
index 84a4b80118..f5a9b75bb3 100644
--- a/vendor/github.com/go-playground/validator/v10/README.md
+++ b/vendor/github.com/go-playground/validator/v10/README.md
@@ -1,7 +1,7 @@
Package validator
=================
[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-![Project status](https://img.shields.io/badge/version-10.12.0-green.svg)
+![Project status](https://img.shields.io/badge/version-10.13.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
index 48a01ca369..d66980b6e2 100644
--- a/vendor/github.com/go-playground/validator/v10/baked_in.go
+++ b/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -33,7 +33,7 @@ type Func func(fl FieldLevel) bool
// validation needs. The return value should be true when validation succeeds.
type FuncCtx func(ctx context.Context, fl FieldLevel) bool
-// wrapFunc wraps noramal Func makes it compatible with FuncCtx
+// wrapFunc wraps normal Func makes it compatible with FuncCtx
func wrapFunc(fn Func) FuncCtx {
if fn == nil {
return nil // be sure not to wrap a bad function.
@@ -73,6 +73,7 @@ var (
"required": hasValue,
"required_if": requiredIf,
"required_unless": requiredUnless,
+ "skip_unless": skipUnless,
"required_with": requiredWith,
"required_with_all": requiredWithAll,
"required_without": requiredWithout,
@@ -928,7 +929,7 @@ func isNe(fl FieldLevel) bool {
return !isEq(fl)
}
-// isNe is the validation function for validating that the field's string value does not equal the
+// isNeIgnoreCase is the validation function for validating that the field's string value does not equal the
// provided param value. The comparison is case-insensitive
func isNeIgnoreCase(fl FieldLevel) bool {
return !isEqIgnoreCase(fl)
@@ -1648,7 +1649,7 @@ func hasValue(fl FieldLevel) bool {
}
}
-// requireCheckField is a func for check field kind
+// requireCheckFieldKind is a func for check field kind
func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool {
field := fl.Field()
kind := field.Kind()
@@ -1728,10 +1729,10 @@ func excludedIf(fl FieldLevel) bool {
for i := 0; i < len(params); i += 2 {
if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
- return false
+ return true
}
}
- return true
+ return !hasValue(fl)
}
// requiredUnless is the validation function
@@ -1750,6 +1751,21 @@ func requiredUnless(fl FieldLevel) bool {
return hasValue(fl)
}
+// skipUnless is the validation function
+// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
+func skipUnless(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for skip_unless %s", fl.FieldName()))
+ }
+ for i := 0; i < len(params); i += 2 {
+ if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
// excludedUnless is the validation function
// The field under validation must not be present or is empty unless all the other specified fields are equal to the value following with the specified field.
func excludedUnless(fl FieldLevel) bool {
@@ -2593,13 +2609,13 @@ func isIso3166Alpha2(fl FieldLevel) bool {
return iso3166_1_alpha2[val]
}
-// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
+// isIso3166Alpha3 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
func isIso3166Alpha3(fl FieldLevel) bool {
val := fl.Field().String()
return iso3166_1_alpha3[val]
}
-// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
+// isIso3166AlphaNumeric is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
func isIso3166AlphaNumeric(fl FieldLevel) bool {
field := fl.Field()
diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go
index b6c7a90ca7..91b2e0b909 100644
--- a/vendor/github.com/go-playground/validator/v10/country_codes.go
+++ b/vendor/github.com/go-playground/validator/v10/country_codes.go
@@ -158,975 +158,993 @@ var iso3166_1_alpha_numeric = map[int]bool{
800: true, 804: true, 784: true, 826: true, 581: true,
840: true, 858: true, 860: true, 548: true, 862: true,
704: true, 92: true, 850: true, 876: true, 732: true,
- 887: true, 894: true, 716: true, 248: true, 153:true,
+ 887: true, 894: true, 716: true, 248: true, 153: true,
}
var iso3166_2 = map[string]bool{
- "AD-02" : true, "AD-03" : true, "AD-04" : true, "AD-05" : true, "AD-06" : true,
- "AD-07" : true, "AD-08" : true, "AE-AJ" : true, "AE-AZ" : true, "AE-DU" : true,
- "AE-FU" : true, "AE-RK" : true, "AE-SH" : true, "AE-UQ" : true, "AF-BAL" : true,
- "AF-BAM" : true, "AF-BDG" : true, "AF-BDS" : true, "AF-BGL" : true, "AF-DAY" : true,
- "AF-FRA" : true, "AF-FYB" : true, "AF-GHA" : true, "AF-GHO" : true, "AF-HEL" : true,
- "AF-HER" : true, "AF-JOW" : true, "AF-KAB" : true, "AF-KAN" : true, "AF-KAP" : true,
- "AF-KDZ" : true, "AF-KHO" : true, "AF-KNR" : true, "AF-LAG" : true, "AF-LOG" : true,
- "AF-NAN" : true, "AF-NIM" : true, "AF-NUR" : true, "AF-PAN" : true, "AF-PAR" : true,
- "AF-PIA" : true, "AF-PKA" : true, "AF-SAM" : true, "AF-SAR" : true, "AF-TAK" : true,
- "AF-URU" : true, "AF-WAR" : true, "AF-ZAB" : true, "AG-03" : true, "AG-04" : true,
- "AG-05" : true, "AG-06" : true, "AG-07" : true, "AG-08" : true, "AG-10" : true,
- "AG-11" : true, "AL-01" : true, "AL-02" : true, "AL-03" : true, "AL-04" : true,
- "AL-05" : true, "AL-06" : true, "AL-07" : true, "AL-08" : true, "AL-09" : true,
- "AL-10" : true, "AL-11" : true, "AL-12" : true, "AL-BR" : true, "AL-BU" : true,
- "AL-DI" : true, "AL-DL" : true, "AL-DR" : true, "AL-DV" : true, "AL-EL" : true,
- "AL-ER" : true, "AL-FR" : true, "AL-GJ" : true, "AL-GR" : true, "AL-HA" : true,
- "AL-KA" : true, "AL-KB" : true, "AL-KC" : true, "AL-KO" : true, "AL-KR" : true,
- "AL-KU" : true, "AL-LB" : true, "AL-LE" : true, "AL-LU" : true, "AL-MK" : true,
- "AL-MM" : true, "AL-MR" : true, "AL-MT" : true, "AL-PG" : true, "AL-PQ" : true,
- "AL-PR" : true, "AL-PU" : true, "AL-SH" : true, "AL-SK" : true, "AL-SR" : true,
- "AL-TE" : true, "AL-TP" : true, "AL-TR" : true, "AL-VL" : true, "AM-AG" : true,
- "AM-AR" : true, "AM-AV" : true, "AM-ER" : true, "AM-GR" : true, "AM-KT" : true,
- "AM-LO" : true, "AM-SH" : true, "AM-SU" : true, "AM-TV" : true, "AM-VD" : true,
- "AO-BGO" : true, "AO-BGU" : true, "AO-BIE" : true, "AO-CAB" : true, "AO-CCU" : true,
- "AO-CNN" : true, "AO-CNO" : true, "AO-CUS" : true, "AO-HUA" : true, "AO-HUI" : true,
- "AO-LNO" : true, "AO-LSU" : true, "AO-LUA" : true, "AO-MAL" : true, "AO-MOX" : true,
- "AO-NAM" : true, "AO-UIG" : true, "AO-ZAI" : true, "AR-A" : true, "AR-B" : true,
- "AR-C" : true, "AR-D" : true, "AR-E" : true, "AR-G" : true, "AR-H" : true,
- "AR-J" : true, "AR-K" : true, "AR-L" : true, "AR-M" : true, "AR-N" : true,
- "AR-P" : true, "AR-Q" : true, "AR-R" : true, "AR-S" : true, "AR-T" : true,
- "AR-U" : true, "AR-V" : true, "AR-W" : true, "AR-X" : true, "AR-Y" : true,
- "AR-Z" : true, "AT-1" : true, "AT-2" : true, "AT-3" : true, "AT-4" : true,
- "AT-5" : true, "AT-6" : true, "AT-7" : true, "AT-8" : true, "AT-9" : true,
- "AU-ACT" : true, "AU-NSW" : true, "AU-NT" : true, "AU-QLD" : true, "AU-SA" : true,
- "AU-TAS" : true, "AU-VIC" : true, "AU-WA" : true, "AZ-ABS" : true, "AZ-AGA" : true,
- "AZ-AGC" : true, "AZ-AGM" : true, "AZ-AGS" : true, "AZ-AGU" : true, "AZ-AST" : true,
- "AZ-BA" : true, "AZ-BAB" : true, "AZ-BAL" : true, "AZ-BAR" : true, "AZ-BEY" : true,
- "AZ-BIL" : true, "AZ-CAB" : true, "AZ-CAL" : true, "AZ-CUL" : true, "AZ-DAS" : true,
- "AZ-FUZ" : true, "AZ-GA" : true, "AZ-GAD" : true, "AZ-GOR" : true, "AZ-GOY" : true,
- "AZ-GYG" : true, "AZ-HAC" : true, "AZ-IMI" : true, "AZ-ISM" : true, "AZ-KAL" : true,
- "AZ-KAN" : true, "AZ-KUR" : true, "AZ-LA" : true, "AZ-LAC" : true, "AZ-LAN" : true,
- "AZ-LER" : true, "AZ-MAS" : true, "AZ-MI" : true, "AZ-NA" : true, "AZ-NEF" : true,
- "AZ-NV" : true, "AZ-NX" : true, "AZ-OGU" : true, "AZ-ORD" : true, "AZ-QAB" : true,
- "AZ-QAX" : true, "AZ-QAZ" : true, "AZ-QBA" : true, "AZ-QBI" : true, "AZ-QOB" : true,
- "AZ-QUS" : true, "AZ-SA" : true, "AZ-SAB" : true, "AZ-SAD" : true, "AZ-SAH" : true,
- "AZ-SAK" : true, "AZ-SAL" : true, "AZ-SAR" : true, "AZ-SAT" : true, "AZ-SBN" : true,
- "AZ-SIY" : true, "AZ-SKR" : true, "AZ-SM" : true, "AZ-SMI" : true, "AZ-SMX" : true,
- "AZ-SR" : true, "AZ-SUS" : true, "AZ-TAR" : true, "AZ-TOV" : true, "AZ-UCA" : true,
- "AZ-XA" : true, "AZ-XAC" : true, "AZ-XCI" : true, "AZ-XIZ" : true, "AZ-XVD" : true,
- "AZ-YAR" : true, "AZ-YE" : true, "AZ-YEV" : true, "AZ-ZAN" : true, "AZ-ZAQ" : true,
- "AZ-ZAR" : true, "BA-01" : true, "BA-02" : true, "BA-03" : true, "BA-04" : true,
- "BA-05" : true, "BA-06" : true, "BA-07" : true, "BA-08" : true, "BA-09" : true,
- "BA-10" : true, "BA-BIH" : true, "BA-BRC" : true, "BA-SRP" : true, "BB-01" : true,
- "BB-02" : true, "BB-03" : true, "BB-04" : true, "BB-05" : true, "BB-06" : true,
- "BB-07" : true, "BB-08" : true, "BB-09" : true, "BB-10" : true, "BB-11" : true,
- "BD-01" : true, "BD-02" : true, "BD-03" : true, "BD-04" : true, "BD-05" : true,
- "BD-06" : true, "BD-07" : true, "BD-08" : true, "BD-09" : true, "BD-10" : true,
- "BD-11" : true, "BD-12" : true, "BD-13" : true, "BD-14" : true, "BD-15" : true,
- "BD-16" : true, "BD-17" : true, "BD-18" : true, "BD-19" : true, "BD-20" : true,
- "BD-21" : true, "BD-22" : true, "BD-23" : true, "BD-24" : true, "BD-25" : true,
- "BD-26" : true, "BD-27" : true, "BD-28" : true, "BD-29" : true, "BD-30" : true,
- "BD-31" : true, "BD-32" : true, "BD-33" : true, "BD-34" : true, "BD-35" : true,
- "BD-36" : true, "BD-37" : true, "BD-38" : true, "BD-39" : true, "BD-40" : true,
- "BD-41" : true, "BD-42" : true, "BD-43" : true, "BD-44" : true, "BD-45" : true,
- "BD-46" : true, "BD-47" : true, "BD-48" : true, "BD-49" : true, "BD-50" : true,
- "BD-51" : true, "BD-52" : true, "BD-53" : true, "BD-54" : true, "BD-55" : true,
- "BD-56" : true, "BD-57" : true, "BD-58" : true, "BD-59" : true, "BD-60" : true,
- "BD-61" : true, "BD-62" : true, "BD-63" : true, "BD-64" : true, "BD-A" : true,
- "BD-B" : true, "BD-C" : true, "BD-D" : true, "BD-E" : true, "BD-F" : true,
- "BD-G" : true, "BE-BRU" : true, "BE-VAN" : true, "BE-VBR" : true, "BE-VLG" : true,
- "BE-VLI" : true, "BE-VOV" : true, "BE-VWV" : true, "BE-WAL" : true, "BE-WBR" : true,
- "BE-WHT" : true, "BE-WLG" : true, "BE-WLX" : true, "BE-WNA" : true, "BF-01" : true,
- "BF-02" : true, "BF-03" : true, "BF-04" : true, "BF-05" : true, "BF-06" : true,
- "BF-07" : true, "BF-08" : true, "BF-09" : true, "BF-10" : true, "BF-11" : true,
- "BF-12" : true, "BF-13" : true, "BF-BAL" : true, "BF-BAM" : true, "BF-BAN" : true,
- "BF-BAZ" : true, "BF-BGR" : true, "BF-BLG" : true, "BF-BLK" : true, "BF-COM" : true,
- "BF-GAN" : true, "BF-GNA" : true, "BF-GOU" : true, "BF-HOU" : true, "BF-IOB" : true,
- "BF-KAD" : true, "BF-KEN" : true, "BF-KMD" : true, "BF-KMP" : true, "BF-KOP" : true,
- "BF-KOS" : true, "BF-KOT" : true, "BF-KOW" : true, "BF-LER" : true, "BF-LOR" : true,
- "BF-MOU" : true, "BF-NAM" : true, "BF-NAO" : true, "BF-NAY" : true, "BF-NOU" : true,
- "BF-OUB" : true, "BF-OUD" : true, "BF-PAS" : true, "BF-PON" : true, "BF-SEN" : true,
- "BF-SIS" : true, "BF-SMT" : true, "BF-SNG" : true, "BF-SOM" : true, "BF-SOR" : true,
- "BF-TAP" : true, "BF-TUI" : true, "BF-YAG" : true, "BF-YAT" : true, "BF-ZIR" : true,
- "BF-ZON" : true, "BF-ZOU" : true, "BG-01" : true, "BG-02" : true, "BG-03" : true,
- "BG-04" : true, "BG-05" : true, "BG-06" : true, "BG-07" : true, "BG-08" : true,
- "BG-09" : true, "BG-10" : true, "BG-11" : true, "BG-12" : true, "BG-13" : true,
- "BG-14" : true, "BG-15" : true, "BG-16" : true, "BG-17" : true, "BG-18" : true,
- "BG-19" : true, "BG-20" : true, "BG-21" : true, "BG-22" : true, "BG-23" : true,
- "BG-24" : true, "BG-25" : true, "BG-26" : true, "BG-27" : true, "BG-28" : true,
- "BH-13" : true, "BH-14" : true, "BH-15" : true, "BH-16" : true, "BH-17" : true,
- "BI-BB" : true, "BI-BL" : true, "BI-BM" : true, "BI-BR" : true, "BI-CA" : true,
- "BI-CI" : true, "BI-GI" : true, "BI-KI" : true, "BI-KR" : true, "BI-KY" : true,
- "BI-MA" : true, "BI-MU" : true, "BI-MW" : true, "BI-NG" : true, "BI-RT" : true,
- "BI-RY" : true, "BJ-AK" : true, "BJ-AL" : true, "BJ-AQ" : true, "BJ-BO" : true,
- "BJ-CO" : true, "BJ-DO" : true, "BJ-KO" : true, "BJ-LI" : true, "BJ-MO" : true,
- "BJ-OU" : true, "BJ-PL" : true, "BJ-ZO" : true, "BN-BE" : true, "BN-BM" : true,
- "BN-TE" : true, "BN-TU" : true, "BO-B" : true, "BO-C" : true, "BO-H" : true,
- "BO-L" : true, "BO-N" : true, "BO-O" : true, "BO-P" : true, "BO-S" : true,
- "BO-T" : true, "BQ-BO" : true, "BQ-SA" : true, "BQ-SE" : true, "BR-AC" : true,
- "BR-AL" : true, "BR-AM" : true, "BR-AP" : true, "BR-BA" : true, "BR-CE" : true,
- "BR-DF" : true, "BR-ES" : true, "BR-FN" : true, "BR-GO" : true, "BR-MA" : true,
- "BR-MG" : true, "BR-MS" : true, "BR-MT" : true, "BR-PA" : true, "BR-PB" : true,
- "BR-PE" : true, "BR-PI" : true, "BR-PR" : true, "BR-RJ" : true, "BR-RN" : true,
- "BR-RO" : true, "BR-RR" : true, "BR-RS" : true, "BR-SC" : true, "BR-SE" : true,
- "BR-SP" : true, "BR-TO" : true, "BS-AK" : true, "BS-BI" : true, "BS-BP" : true,
- "BS-BY" : true, "BS-CE" : true, "BS-CI" : true, "BS-CK" : true, "BS-CO" : true,
- "BS-CS" : true, "BS-EG" : true, "BS-EX" : true, "BS-FP" : true, "BS-GC" : true,
- "BS-HI" : true, "BS-HT" : true, "BS-IN" : true, "BS-LI" : true, "BS-MC" : true,
- "BS-MG" : true, "BS-MI" : true, "BS-NE" : true, "BS-NO" : true, "BS-NS" : true,
- "BS-RC" : true, "BS-RI" : true, "BS-SA" : true, "BS-SE" : true, "BS-SO" : true,
- "BS-SS" : true, "BS-SW" : true, "BS-WG" : true, "BT-11" : true, "BT-12" : true,
- "BT-13" : true, "BT-14" : true, "BT-15" : true, "BT-21" : true, "BT-22" : true,
- "BT-23" : true, "BT-24" : true, "BT-31" : true, "BT-32" : true, "BT-33" : true,
- "BT-34" : true, "BT-41" : true, "BT-42" : true, "BT-43" : true, "BT-44" : true,
- "BT-45" : true, "BT-GA" : true, "BT-TY" : true, "BW-CE" : true, "BW-GH" : true,
- "BW-KG" : true, "BW-KL" : true, "BW-KW" : true, "BW-NE" : true, "BW-NW" : true,
- "BW-SE" : true, "BW-SO" : true, "BY-BR" : true, "BY-HM" : true, "BY-HO" : true,
- "BY-HR" : true, "BY-MA" : true, "BY-MI" : true, "BY-VI" : true, "BZ-BZ" : true,
- "BZ-CY" : true, "BZ-CZL" : true, "BZ-OW" : true, "BZ-SC" : true, "BZ-TOL" : true,
- "CA-AB" : true, "CA-BC" : true, "CA-MB" : true, "CA-NB" : true, "CA-NL" : true,
- "CA-NS" : true, "CA-NT" : true, "CA-NU" : true, "CA-ON" : true, "CA-PE" : true,
- "CA-QC" : true, "CA-SK" : true, "CA-YT" : true, "CD-BC" : true, "CD-BN" : true,
- "CD-EQ" : true, "CD-KA" : true, "CD-KE" : true, "CD-KN" : true, "CD-KW" : true,
- "CD-MA" : true, "CD-NK" : true, "CD-OR" : true, "CD-SK" : true, "CF-AC" : true,
- "CF-BB" : true, "CF-BGF" : true, "CF-BK" : true, "CF-HK" : true, "CF-HM" : true,
- "CF-HS" : true, "CF-KB" : true, "CF-KG" : true, "CF-LB" : true, "CF-MB" : true,
- "CF-MP" : true, "CF-NM" : true, "CF-OP" : true, "CF-SE" : true, "CF-UK" : true,
- "CF-VK" : true, "CG-11" : true, "CG-12" : true, "CG-13" : true, "CG-14" : true,
- "CG-15" : true, "CG-2" : true, "CG-5" : true, "CG-7" : true, "CG-8" : true,
- "CG-9" : true, "CG-BZV" : true, "CH-AG" : true, "CH-AI" : true, "CH-AR" : true,
- "CH-BE" : true, "CH-BL" : true, "CH-BS" : true, "CH-FR" : true, "CH-GE" : true,
- "CH-GL" : true, "CH-GR" : true, "CH-JU" : true, "CH-LU" : true, "CH-NE" : true,
- "CH-NW" : true, "CH-OW" : true, "CH-SG" : true, "CH-SH" : true, "CH-SO" : true,
- "CH-SZ" : true, "CH-TG" : true, "CH-TI" : true, "CH-UR" : true, "CH-VD" : true,
- "CH-VS" : true, "CH-ZG" : true, "CH-ZH" : true, "CI-01" : true, "CI-02" : true,
- "CI-03" : true, "CI-04" : true, "CI-05" : true, "CI-06" : true, "CI-07" : true,
- "CI-08" : true, "CI-09" : true, "CI-10" : true, "CI-11" : true, "CI-12" : true,
- "CI-13" : true, "CI-14" : true, "CI-15" : true, "CI-16" : true, "CI-17" : true,
- "CI-18" : true, "CI-19" : true, "CL-AI" : true, "CL-AN" : true, "CL-AP" : true,
- "CL-AR" : true, "CL-AT" : true, "CL-BI" : true, "CL-CO" : true, "CL-LI" : true,
- "CL-LL" : true, "CL-LR" : true, "CL-MA" : true, "CL-ML" : true, "CL-RM" : true,
- "CL-TA" : true, "CL-VS" : true, "CM-AD" : true, "CM-CE" : true, "CM-EN" : true,
- "CM-ES" : true, "CM-LT" : true, "CM-NO" : true, "CM-NW" : true, "CM-OU" : true,
- "CM-SU" : true, "CM-SW" : true, "CN-11" : true, "CN-12" : true, "CN-13" : true,
- "CN-14" : true, "CN-15" : true, "CN-21" : true, "CN-22" : true, "CN-23" : true,
- "CN-31" : true, "CN-32" : true, "CN-33" : true, "CN-34" : true, "CN-35" : true,
- "CN-36" : true, "CN-37" : true, "CN-41" : true, "CN-42" : true, "CN-43" : true,
- "CN-44" : true, "CN-45" : true, "CN-46" : true, "CN-50" : true, "CN-51" : true,
- "CN-52" : true, "CN-53" : true, "CN-54" : true, "CN-61" : true, "CN-62" : true,
- "CN-63" : true, "CN-64" : true, "CN-65" : true, "CN-71" : true, "CN-91" : true,
- "CN-92" : true, "CO-AMA" : true, "CO-ANT" : true, "CO-ARA" : true, "CO-ATL" : true,
- "CO-BOL" : true, "CO-BOY" : true, "CO-CAL" : true, "CO-CAQ" : true, "CO-CAS" : true,
- "CO-CAU" : true, "CO-CES" : true, "CO-CHO" : true, "CO-COR" : true, "CO-CUN" : true,
- "CO-DC" : true, "CO-GUA" : true, "CO-GUV" : true, "CO-HUI" : true, "CO-LAG" : true,
- "CO-MAG" : true, "CO-MET" : true, "CO-NAR" : true, "CO-NSA" : true, "CO-PUT" : true,
- "CO-QUI" : true, "CO-RIS" : true, "CO-SAN" : true, "CO-SAP" : true, "CO-SUC" : true,
- "CO-TOL" : true, "CO-VAC" : true, "CO-VAU" : true, "CO-VID" : true, "CR-A" : true,
- "CR-C" : true, "CR-G" : true, "CR-H" : true, "CR-L" : true, "CR-P" : true,
- "CR-SJ" : true, "CU-01" : true, "CU-02" : true, "CU-03" : true, "CU-04" : true,
- "CU-05" : true, "CU-06" : true, "CU-07" : true, "CU-08" : true, "CU-09" : true,
- "CU-10" : true, "CU-11" : true, "CU-12" : true, "CU-13" : true, "CU-14" : true,
- "CU-99" : true, "CV-B" : true, "CV-BR" : true, "CV-BV" : true, "CV-CA" : true,
- "CV-CF" : true, "CV-CR" : true, "CV-MA" : true, "CV-MO" : true, "CV-PA" : true,
- "CV-PN" : true, "CV-PR" : true, "CV-RB" : true, "CV-RG" : true, "CV-RS" : true,
- "CV-S" : true, "CV-SD" : true, "CV-SF" : true, "CV-SL" : true, "CV-SM" : true,
- "CV-SO" : true, "CV-SS" : true, "CV-SV" : true, "CV-TA" : true, "CV-TS" : true,
- "CY-01" : true, "CY-02" : true, "CY-03" : true, "CY-04" : true, "CY-05" : true,
- "CY-06" : true, "CZ-10" : true, "CZ-101" : true, "CZ-102" : true, "CZ-103" : true,
- "CZ-104" : true, "CZ-105" : true, "CZ-106" : true, "CZ-107" : true, "CZ-108" : true,
- "CZ-109" : true, "CZ-110" : true, "CZ-111" : true, "CZ-112" : true, "CZ-113" : true,
- "CZ-114" : true, "CZ-115" : true, "CZ-116" : true, "CZ-117" : true, "CZ-118" : true,
- "CZ-119" : true, "CZ-120" : true, "CZ-121" : true, "CZ-122" : true, "CZ-20" : true,
- "CZ-201" : true, "CZ-202" : true, "CZ-203" : true, "CZ-204" : true, "CZ-205" : true,
- "CZ-206" : true, "CZ-207" : true, "CZ-208" : true, "CZ-209" : true, "CZ-20A" : true,
- "CZ-20B" : true, "CZ-20C" : true, "CZ-31" : true, "CZ-311" : true, "CZ-312" : true,
- "CZ-313" : true, "CZ-314" : true, "CZ-315" : true, "CZ-316" : true, "CZ-317" : true,
- "CZ-32" : true, "CZ-321" : true, "CZ-322" : true, "CZ-323" : true, "CZ-324" : true,
- "CZ-325" : true, "CZ-326" : true, "CZ-327" : true, "CZ-41" : true, "CZ-411" : true,
- "CZ-412" : true, "CZ-413" : true, "CZ-42" : true, "CZ-421" : true, "CZ-422" : true,
- "CZ-423" : true, "CZ-424" : true, "CZ-425" : true, "CZ-426" : true, "CZ-427" : true,
- "CZ-51" : true, "CZ-511" : true, "CZ-512" : true, "CZ-513" : true, "CZ-514" : true,
- "CZ-52" : true, "CZ-521" : true, "CZ-522" : true, "CZ-523" : true, "CZ-524" : true,
- "CZ-525" : true, "CZ-53" : true, "CZ-531" : true, "CZ-532" : true, "CZ-533" : true,
- "CZ-534" : true, "CZ-63" : true, "CZ-631" : true, "CZ-632" : true, "CZ-633" : true,
- "CZ-634" : true, "CZ-635" : true, "CZ-64" : true, "CZ-641" : true, "CZ-642" : true,
- "CZ-643" : true, "CZ-644" : true, "CZ-645" : true, "CZ-646" : true, "CZ-647" : true,
- "CZ-71" : true, "CZ-711" : true, "CZ-712" : true, "CZ-713" : true, "CZ-714" : true,
- "CZ-715" : true, "CZ-72" : true, "CZ-721" : true, "CZ-722" : true, "CZ-723" : true,
- "CZ-724" : true, "CZ-80" : true, "CZ-801" : true, "CZ-802" : true, "CZ-803" : true,
- "CZ-804" : true, "CZ-805" : true, "CZ-806" : true, "DE-BB" : true, "DE-BE" : true,
- "DE-BW" : true, "DE-BY" : true, "DE-HB" : true, "DE-HE" : true, "DE-HH" : true,
- "DE-MV" : true, "DE-NI" : true, "DE-NW" : true, "DE-RP" : true, "DE-SH" : true,
- "DE-SL" : true, "DE-SN" : true, "DE-ST" : true, "DE-TH" : true, "DJ-AR" : true,
- "DJ-AS" : true, "DJ-DI" : true, "DJ-DJ" : true, "DJ-OB" : true, "DJ-TA" : true,
- "DK-81" : true, "DK-82" : true, "DK-83" : true, "DK-84" : true, "DK-85" : true,
- "DM-01" : true, "DM-02" : true, "DM-03" : true, "DM-04" : true, "DM-05" : true,
- "DM-06" : true, "DM-07" : true, "DM-08" : true, "DM-09" : true, "DM-10" : true,
- "DO-01" : true, "DO-02" : true, "DO-03" : true, "DO-04" : true, "DO-05" : true,
- "DO-06" : true, "DO-07" : true, "DO-08" : true, "DO-09" : true, "DO-10" : true,
- "DO-11" : true, "DO-12" : true, "DO-13" : true, "DO-14" : true, "DO-15" : true,
- "DO-16" : true, "DO-17" : true, "DO-18" : true, "DO-19" : true, "DO-20" : true,
- "DO-21" : true, "DO-22" : true, "DO-23" : true, "DO-24" : true, "DO-25" : true,
- "DO-26" : true, "DO-27" : true, "DO-28" : true, "DO-29" : true, "DO-30" : true,
- "DZ-01" : true, "DZ-02" : true, "DZ-03" : true, "DZ-04" : true, "DZ-05" : true,
- "DZ-06" : true, "DZ-07" : true, "DZ-08" : true, "DZ-09" : true, "DZ-10" : true,
- "DZ-11" : true, "DZ-12" : true, "DZ-13" : true, "DZ-14" : true, "DZ-15" : true,
- "DZ-16" : true, "DZ-17" : true, "DZ-18" : true, "DZ-19" : true, "DZ-20" : true,
- "DZ-21" : true, "DZ-22" : true, "DZ-23" : true, "DZ-24" : true, "DZ-25" : true,
- "DZ-26" : true, "DZ-27" : true, "DZ-28" : true, "DZ-29" : true, "DZ-30" : true,
- "DZ-31" : true, "DZ-32" : true, "DZ-33" : true, "DZ-34" : true, "DZ-35" : true,
- "DZ-36" : true, "DZ-37" : true, "DZ-38" : true, "DZ-39" : true, "DZ-40" : true,
- "DZ-41" : true, "DZ-42" : true, "DZ-43" : true, "DZ-44" : true, "DZ-45" : true,
- "DZ-46" : true, "DZ-47" : true, "DZ-48" : true, "EC-A" : true, "EC-B" : true,
- "EC-C" : true, "EC-D" : true, "EC-E" : true, "EC-F" : true, "EC-G" : true,
- "EC-H" : true, "EC-I" : true, "EC-L" : true, "EC-M" : true, "EC-N" : true,
- "EC-O" : true, "EC-P" : true, "EC-R" : true, "EC-S" : true, "EC-SD" : true,
- "EC-SE" : true, "EC-T" : true, "EC-U" : true, "EC-W" : true, "EC-X" : true,
- "EC-Y" : true, "EC-Z" : true, "EE-37" : true, "EE-39" : true, "EE-44" : true,
- "EE-49" : true, "EE-51" : true, "EE-57" : true, "EE-59" : true, "EE-65" : true,
- "EE-67" : true, "EE-70" : true, "EE-74" : true, "EE-78" : true, "EE-82" : true,
- "EE-84" : true, "EE-86" : true, "EG-ALX" : true, "EG-ASN" : true, "EG-AST" : true,
- "EG-BA" : true, "EG-BH" : true, "EG-BNS" : true, "EG-C" : true, "EG-DK" : true,
- "EG-DT" : true, "EG-FYM" : true, "EG-GH" : true, "EG-GZ" : true, "EG-HU" : true,
- "EG-IS" : true, "EG-JS" : true, "EG-KB" : true, "EG-KFS" : true, "EG-KN" : true,
- "EG-MN" : true, "EG-MNF" : true, "EG-MT" : true, "EG-PTS" : true, "EG-SHG" : true,
- "EG-SHR" : true, "EG-SIN" : true, "EG-SU" : true, "EG-SUZ" : true, "EG-WAD" : true,
- "ER-AN" : true, "ER-DK" : true, "ER-DU" : true, "ER-GB" : true, "ER-MA" : true,
- "ER-SK" : true, "ES-A" : true, "ES-AB" : true, "ES-AL" : true, "ES-AN" : true,
- "ES-AR" : true, "ES-AS" : true, "ES-AV" : true, "ES-B" : true, "ES-BA" : true,
- "ES-BI" : true, "ES-BU" : true, "ES-C" : true, "ES-CA" : true, "ES-CB" : true,
- "ES-CC" : true, "ES-CE" : true, "ES-CL" : true, "ES-CM" : true, "ES-CN" : true,
- "ES-CO" : true, "ES-CR" : true, "ES-CS" : true, "ES-CT" : true, "ES-CU" : true,
- "ES-EX" : true, "ES-GA" : true, "ES-GC" : true, "ES-GI" : true, "ES-GR" : true,
- "ES-GU" : true, "ES-H" : true, "ES-HU" : true, "ES-IB" : true, "ES-J" : true,
- "ES-L" : true, "ES-LE" : true, "ES-LO" : true, "ES-LU" : true, "ES-M" : true,
- "ES-MA" : true, "ES-MC" : true, "ES-MD" : true, "ES-ML" : true, "ES-MU" : true,
- "ES-NA" : true, "ES-NC" : true, "ES-O" : true, "ES-OR" : true, "ES-P" : true,
- "ES-PM" : true, "ES-PO" : true, "ES-PV" : true, "ES-RI" : true, "ES-S" : true,
- "ES-SA" : true, "ES-SE" : true, "ES-SG" : true, "ES-SO" : true, "ES-SS" : true,
- "ES-T" : true, "ES-TE" : true, "ES-TF" : true, "ES-TO" : true, "ES-V" : true,
- "ES-VA" : true, "ES-VC" : true, "ES-VI" : true, "ES-Z" : true, "ES-ZA" : true,
- "ET-AA" : true, "ET-AF" : true, "ET-AM" : true, "ET-BE" : true, "ET-DD" : true,
- "ET-GA" : true, "ET-HA" : true, "ET-OR" : true, "ET-SN" : true, "ET-SO" : true,
- "ET-TI" : true, "FI-01" : true, "FI-02" : true, "FI-03" : true, "FI-04" : true,
- "FI-05" : true, "FI-06" : true, "FI-07" : true, "FI-08" : true, "FI-09" : true,
- "FI-10" : true, "FI-11" : true, "FI-12" : true, "FI-13" : true, "FI-14" : true,
- "FI-15" : true, "FI-16" : true, "FI-17" : true, "FI-18" : true, "FI-19" : true,
- "FJ-C" : true, "FJ-E" : true, "FJ-N" : true, "FJ-R" : true, "FJ-W" : true,
- "FM-KSA" : true, "FM-PNI" : true, "FM-TRK" : true, "FM-YAP" : true, "FR-01" : true,
- "FR-02" : true, "FR-03" : true, "FR-04" : true, "FR-05" : true, "FR-06" : true,
- "FR-07" : true, "FR-08" : true, "FR-09" : true, "FR-10" : true, "FR-11" : true,
- "FR-12" : true, "FR-13" : true, "FR-14" : true, "FR-15" : true, "FR-16" : true,
- "FR-17" : true, "FR-18" : true, "FR-19" : true, "FR-21" : true, "FR-22" : true,
- "FR-23" : true, "FR-24" : true, "FR-25" : true, "FR-26" : true, "FR-27" : true,
- "FR-28" : true, "FR-29" : true, "FR-2A" : true, "FR-2B" : true, "FR-30" : true,
- "FR-31" : true, "FR-32" : true, "FR-33" : true, "FR-34" : true, "FR-35" : true,
- "FR-36" : true, "FR-37" : true, "FR-38" : true, "FR-39" : true, "FR-40" : true,
- "FR-41" : true, "FR-42" : true, "FR-43" : true, "FR-44" : true, "FR-45" : true,
- "FR-46" : true, "FR-47" : true, "FR-48" : true, "FR-49" : true, "FR-50" : true,
- "FR-51" : true, "FR-52" : true, "FR-53" : true, "FR-54" : true, "FR-55" : true,
- "FR-56" : true, "FR-57" : true, "FR-58" : true, "FR-59" : true, "FR-60" : true,
- "FR-61" : true, "FR-62" : true, "FR-63" : true, "FR-64" : true, "FR-65" : true,
- "FR-66" : true, "FR-67" : true, "FR-68" : true, "FR-69" : true, "FR-70" : true,
- "FR-71" : true, "FR-72" : true, "FR-73" : true, "FR-74" : true, "FR-75" : true,
- "FR-76" : true, "FR-77" : true, "FR-78" : true, "FR-79" : true, "FR-80" : true,
- "FR-81" : true, "FR-82" : true, "FR-83" : true, "FR-84" : true, "FR-85" : true,
- "FR-86" : true, "FR-87" : true, "FR-88" : true, "FR-89" : true, "FR-90" : true,
- "FR-91" : true, "FR-92" : true, "FR-93" : true, "FR-94" : true, "FR-95" : true,
- "FR-ARA" : true, "FR-BFC" : true, "FR-BL" : true, "FR-BRE" : true, "FR-COR" : true,
- "FR-CP" : true, "FR-CVL" : true, "FR-GES" : true, "FR-GF" : true, "FR-GP" : true,
- "FR-GUA" : true, "FR-HDF" : true, "FR-IDF" : true, "FR-LRE" : true, "FR-MAY" : true,
- "FR-MF" : true, "FR-MQ" : true, "FR-NAQ" : true, "FR-NC" : true, "FR-NOR" : true,
- "FR-OCC" : true, "FR-PAC" : true, "FR-PDL" : true, "FR-PF" : true, "FR-PM" : true,
- "FR-RE" : true, "FR-TF" : true, "FR-WF" : true, "FR-YT" : true, "GA-1" : true,
- "GA-2" : true, "GA-3" : true, "GA-4" : true, "GA-5" : true, "GA-6" : true,
- "GA-7" : true, "GA-8" : true, "GA-9" : true, "GB-ABC" : true, "GB-ABD" : true,
- "GB-ABE" : true, "GB-AGB" : true, "GB-AGY" : true, "GB-AND" : true, "GB-ANN" : true,
- "GB-ANS" : true, "GB-BAS" : true, "GB-BBD" : true, "GB-BDF" : true, "GB-BDG" : true,
- "GB-BEN" : true, "GB-BEX" : true, "GB-BFS" : true, "GB-BGE" : true, "GB-BGW" : true,
- "GB-BIR" : true, "GB-BKM" : true, "GB-BMH" : true, "GB-BNE" : true, "GB-BNH" : true,
- "GB-BNS" : true, "GB-BOL" : true, "GB-BPL" : true, "GB-BRC" : true, "GB-BRD" : true,
- "GB-BRY" : true, "GB-BST" : true, "GB-BUR" : true, "GB-CAM" : true, "GB-CAY" : true,
- "GB-CBF" : true, "GB-CCG" : true, "GB-CGN" : true, "GB-CHE" : true, "GB-CHW" : true,
- "GB-CLD" : true, "GB-CLK" : true, "GB-CMA" : true, "GB-CMD" : true, "GB-CMN" : true,
- "GB-CON" : true, "GB-COV" : true, "GB-CRF" : true, "GB-CRY" : true, "GB-CWY" : true,
- "GB-DAL" : true, "GB-DBY" : true, "GB-DEN" : true, "GB-DER" : true, "GB-DEV" : true,
- "GB-DGY" : true, "GB-DNC" : true, "GB-DND" : true, "GB-DOR" : true, "GB-DRS" : true,
- "GB-DUD" : true, "GB-DUR" : true, "GB-EAL" : true, "GB-EAW" : true, "GB-EAY" : true,
- "GB-EDH" : true, "GB-EDU" : true, "GB-ELN" : true, "GB-ELS" : true, "GB-ENF" : true,
- "GB-ENG" : true, "GB-ERW" : true, "GB-ERY" : true, "GB-ESS" : true, "GB-ESX" : true,
- "GB-FAL" : true, "GB-FIF" : true, "GB-FLN" : true, "GB-FMO" : true, "GB-GAT" : true,
- "GB-GBN" : true, "GB-GLG" : true, "GB-GLS" : true, "GB-GRE" : true, "GB-GWN" : true,
- "GB-HAL" : true, "GB-HAM" : true, "GB-HAV" : true, "GB-HCK" : true, "GB-HEF" : true,
- "GB-HIL" : true, "GB-HLD" : true, "GB-HMF" : true, "GB-HNS" : true, "GB-HPL" : true,
- "GB-HRT" : true, "GB-HRW" : true, "GB-HRY" : true, "GB-IOS" : true, "GB-IOW" : true,
- "GB-ISL" : true, "GB-IVC" : true, "GB-KEC" : true, "GB-KEN" : true, "GB-KHL" : true,
- "GB-KIR" : true, "GB-KTT" : true, "GB-KWL" : true, "GB-LAN" : true, "GB-LBC" : true,
- "GB-LBH" : true, "GB-LCE" : true, "GB-LDS" : true, "GB-LEC" : true, "GB-LEW" : true,
- "GB-LIN" : true, "GB-LIV" : true, "GB-LND" : true, "GB-LUT" : true, "GB-MAN" : true,
- "GB-MDB" : true, "GB-MDW" : true, "GB-MEA" : true, "GB-MIK" : true, "GD-01" : true,
- "GB-MLN" : true, "GB-MON" : true, "GB-MRT" : true, "GB-MRY" : true, "GB-MTY" : true,
- "GB-MUL" : true, "GB-NAY" : true, "GB-NBL" : true, "GB-NEL" : true, "GB-NET" : true,
- "GB-NFK" : true, "GB-NGM" : true, "GB-NIR" : true, "GB-NLK" : true, "GB-NLN" : true,
- "GB-NMD" : true, "GB-NSM" : true, "GB-NTH" : true, "GB-NTL" : true, "GB-NTT" : true,
- "GB-NTY" : true, "GB-NWM" : true, "GB-NWP" : true, "GB-NYK" : true, "GB-OLD" : true,
- "GB-ORK" : true, "GB-OXF" : true, "GB-PEM" : true, "GB-PKN" : true, "GB-PLY" : true,
- "GB-POL" : true, "GB-POR" : true, "GB-POW" : true, "GB-PTE" : true, "GB-RCC" : true,
- "GB-RCH" : true, "GB-RCT" : true, "GB-RDB" : true, "GB-RDG" : true, "GB-RFW" : true,
- "GB-RIC" : true, "GB-ROT" : true, "GB-RUT" : true, "GB-SAW" : true, "GB-SAY" : true,
- "GB-SCB" : true, "GB-SCT" : true, "GB-SFK" : true, "GB-SFT" : true, "GB-SGC" : true,
- "GB-SHF" : true, "GB-SHN" : true, "GB-SHR" : true, "GB-SKP" : true, "GB-SLF" : true,
- "GB-SLG" : true, "GB-SLK" : true, "GB-SND" : true, "GB-SOL" : true, "GB-SOM" : true,
- "GB-SOS" : true, "GB-SRY" : true, "GB-STE" : true, "GB-STG" : true, "GB-STH" : true,
- "GB-STN" : true, "GB-STS" : true, "GB-STT" : true, "GB-STY" : true, "GB-SWA" : true,
- "GB-SWD" : true, "GB-SWK" : true, "GB-TAM" : true, "GB-TFW" : true, "GB-THR" : true,
- "GB-TOB" : true, "GB-TOF" : true, "GB-TRF" : true, "GB-TWH" : true, "GB-UKM" : true,
- "GB-VGL" : true, "GB-WAR" : true, "GB-WBK" : true, "GB-WDU" : true, "GB-WFT" : true,
- "GB-WGN" : true, "GB-WIL" : true, "GB-WKF" : true, "GB-WLL" : true, "GB-WLN" : true,
- "GB-WLS" : true, "GB-WLV" : true, "GB-WND" : true, "GB-WNM" : true, "GB-WOK" : true,
- "GB-WOR" : true, "GB-WRL" : true, "GB-WRT" : true, "GB-WRX" : true, "GB-WSM" : true,
- "GB-WSX" : true, "GB-YOR" : true, "GB-ZET" : true, "GD-02" : true, "GD-03" : true,
- "GD-04" : true, "GD-05" : true, "GD-06" : true, "GD-10" : true, "GE-AB" : true,
- "GE-AJ" : true, "GE-GU" : true, "GE-IM" : true, "GE-KA" : true, "GE-KK" : true,
- "GE-MM" : true, "GE-RL" : true, "GE-SJ" : true, "GE-SK" : true, "GE-SZ" : true,
- "GE-TB" : true, "GH-AA" : true, "GH-AH" : true, "GH-BA" : true, "GH-CP" : true,
- "GH-EP" : true, "GH-NP" : true, "GH-TV" : true, "GH-UE" : true, "GH-UW" : true,
- "GH-WP" : true, "GL-KU" : true, "GL-QA" : true, "GL-QE" : true, "GL-SM" : true,
- "GM-B" : true, "GM-L" : true, "GM-M" : true, "GM-N" : true, "GM-U" : true,
- "GM-W" : true, "GN-B" : true, "GN-BE" : true, "GN-BF" : true, "GN-BK" : true,
- "GN-C" : true, "GN-CO" : true, "GN-D" : true, "GN-DB" : true, "GN-DI" : true,
- "GN-DL" : true, "GN-DU" : true, "GN-F" : true, "GN-FA" : true, "GN-FO" : true,
- "GN-FR" : true, "GN-GA" : true, "GN-GU" : true, "GN-K" : true, "GN-KA" : true,
- "GN-KB" : true, "GN-KD" : true, "GN-KE" : true, "GN-KN" : true, "GN-KO" : true,
- "GN-KS" : true, "GN-L" : true, "GN-LA" : true, "GN-LE" : true, "GN-LO" : true,
- "GN-M" : true, "GN-MC" : true, "GN-MD" : true, "GN-ML" : true, "GN-MM" : true,
- "GN-N" : true, "GN-NZ" : true, "GN-PI" : true, "GN-SI" : true, "GN-TE" : true,
- "GN-TO" : true, "GN-YO" : true, "GQ-AN" : true, "GQ-BN" : true, "GQ-BS" : true,
- "GQ-C" : true, "GQ-CS" : true, "GQ-I" : true, "GQ-KN" : true, "GQ-LI" : true,
- "GQ-WN" : true, "GR-01" : true, "GR-03" : true, "GR-04" : true, "GR-05" : true,
- "GR-06" : true, "GR-07" : true, "GR-11" : true, "GR-12" : true, "GR-13" : true,
- "GR-14" : true, "GR-15" : true, "GR-16" : true, "GR-17" : true, "GR-21" : true,
- "GR-22" : true, "GR-23" : true, "GR-24" : true, "GR-31" : true, "GR-32" : true,
- "GR-33" : true, "GR-34" : true, "GR-41" : true, "GR-42" : true, "GR-43" : true,
- "GR-44" : true, "GR-51" : true, "GR-52" : true, "GR-53" : true, "GR-54" : true,
- "GR-55" : true, "GR-56" : true, "GR-57" : true, "GR-58" : true, "GR-59" : true,
- "GR-61" : true, "GR-62" : true, "GR-63" : true, "GR-64" : true, "GR-69" : true,
- "GR-71" : true, "GR-72" : true, "GR-73" : true, "GR-81" : true, "GR-82" : true,
- "GR-83" : true, "GR-84" : true, "GR-85" : true, "GR-91" : true, "GR-92" : true,
- "GR-93" : true, "GR-94" : true, "GR-A" : true, "GR-A1" : true, "GR-B" : true,
- "GR-C" : true, "GR-D" : true, "GR-E" : true, "GR-F" : true, "GR-G" : true,
- "GR-H" : true, "GR-I" : true, "GR-J" : true, "GR-K" : true, "GR-L" : true,
- "GR-M" : true, "GT-AV" : true, "GT-BV" : true, "GT-CM" : true, "GT-CQ" : true,
- "GT-ES" : true, "GT-GU" : true, "GT-HU" : true, "GT-IZ" : true, "GT-JA" : true,
- "GT-JU" : true, "GT-PE" : true, "GT-PR" : true, "GT-QC" : true, "GT-QZ" : true,
- "GT-RE" : true, "GT-SA" : true, "GT-SM" : true, "GT-SO" : true, "GT-SR" : true,
- "GT-SU" : true, "GT-TO" : true, "GT-ZA" : true, "GW-BA" : true, "GW-BL" : true,
- "GW-BM" : true, "GW-BS" : true, "GW-CA" : true, "GW-GA" : true, "GW-L" : true,
- "GW-N" : true, "GW-OI" : true, "GW-QU" : true, "GW-S" : true, "GW-TO" : true,
- "GY-BA" : true, "GY-CU" : true, "GY-DE" : true, "GY-EB" : true, "GY-ES" : true,
- "GY-MA" : true, "GY-PM" : true, "GY-PT" : true, "GY-UD" : true, "GY-UT" : true,
- "HN-AT" : true, "HN-CH" : true, "HN-CL" : true, "HN-CM" : true, "HN-CP" : true,
- "HN-CR" : true, "HN-EP" : true, "HN-FM" : true, "HN-GD" : true, "HN-IB" : true,
- "HN-IN" : true, "HN-LE" : true, "HN-LP" : true, "HN-OC" : true, "HN-OL" : true,
- "HN-SB" : true, "HN-VA" : true, "HN-YO" : true, "HR-01" : true, "HR-02" : true,
- "HR-03" : true, "HR-04" : true, "HR-05" : true, "HR-06" : true, "HR-07" : true,
- "HR-08" : true, "HR-09" : true, "HR-10" : true, "HR-11" : true, "HR-12" : true,
- "HR-13" : true, "HR-14" : true, "HR-15" : true, "HR-16" : true, "HR-17" : true,
- "HR-18" : true, "HR-19" : true, "HR-20" : true, "HR-21" : true, "HT-AR" : true,
- "HT-CE" : true, "HT-GA" : true, "HT-ND" : true, "HT-NE" : true, "HT-NO" : true,
- "HT-OU" : true, "HT-SD" : true, "HT-SE" : true, "HU-BA" : true, "HU-BC" : true,
- "HU-BE" : true, "HU-BK" : true, "HU-BU" : true, "HU-BZ" : true, "HU-CS" : true,
- "HU-DE" : true, "HU-DU" : true, "HU-EG" : true, "HU-ER" : true, "HU-FE" : true,
- "HU-GS" : true, "HU-GY" : true, "HU-HB" : true, "HU-HE" : true, "HU-HV" : true,
- "HU-JN" : true, "HU-KE" : true, "HU-KM" : true, "HU-KV" : true, "HU-MI" : true,
- "HU-NK" : true, "HU-NO" : true, "HU-NY" : true, "HU-PE" : true, "HU-PS" : true,
- "HU-SD" : true, "HU-SF" : true, "HU-SH" : true, "HU-SK" : true, "HU-SN" : true,
- "HU-SO" : true, "HU-SS" : true, "HU-ST" : true, "HU-SZ" : true, "HU-TB" : true,
- "HU-TO" : true, "HU-VA" : true, "HU-VE" : true, "HU-VM" : true, "HU-ZA" : true,
- "HU-ZE" : true, "ID-AC" : true, "ID-BA" : true, "ID-BB" : true, "ID-BE" : true,
- "ID-BT" : true, "ID-GO" : true, "ID-IJ" : true, "ID-JA" : true, "ID-JB" : true,
- "ID-JI" : true, "ID-JK" : true, "ID-JT" : true, "ID-JW" : true, "ID-KA" : true,
- "ID-KB" : true, "ID-KI" : true, "ID-KR" : true, "ID-KS" : true, "ID-KT" : true,
- "ID-LA" : true, "ID-MA" : true, "ID-ML" : true, "ID-MU" : true, "ID-NB" : true,
- "ID-NT" : true, "ID-NU" : true, "ID-PA" : true, "ID-PB" : true, "ID-RI" : true,
- "ID-SA" : true, "ID-SB" : true, "ID-SG" : true, "ID-SL" : true, "ID-SM" : true,
- "ID-SN" : true, "ID-SR" : true, "ID-SS" : true, "ID-ST" : true, "ID-SU" : true,
- "ID-YO" : true, "IE-C" : true, "IE-CE" : true, "IE-CN" : true, "IE-CO" : true,
- "IE-CW" : true, "IE-D" : true, "IE-DL" : true, "IE-G" : true, "IE-KE" : true,
- "IE-KK" : true, "IE-KY" : true, "IE-L" : true, "IE-LD" : true, "IE-LH" : true,
- "IE-LK" : true, "IE-LM" : true, "IE-LS" : true, "IE-M" : true, "IE-MH" : true,
- "IE-MN" : true, "IE-MO" : true, "IE-OY" : true, "IE-RN" : true, "IE-SO" : true,
- "IE-TA" : true, "IE-U" : true, "IE-WD" : true, "IE-WH" : true, "IE-WW" : true,
- "IE-WX" : true, "IL-D" : true, "IL-HA" : true, "IL-JM" : true, "IL-M" : true,
- "IL-TA" : true, "IL-Z" : true, "IN-AN" : true, "IN-AP" : true, "IN-AR" : true,
- "IN-AS" : true, "IN-BR" : true, "IN-CH" : true, "IN-CT" : true, "IN-DD" : true,
- "IN-DL" : true, "IN-DN" : true, "IN-GA" : true, "IN-GJ" : true, "IN-HP" : true,
- "IN-HR" : true, "IN-JH" : true, "IN-JK" : true, "IN-KA" : true, "IN-KL" : true,
- "IN-LD" : true, "IN-MH" : true, "IN-ML" : true, "IN-MN" : true, "IN-MP" : true,
- "IN-MZ" : true, "IN-NL" : true, "IN-OR" : true, "IN-PB" : true, "IN-PY" : true,
- "IN-RJ" : true, "IN-SK" : true, "IN-TN" : true, "IN-TR" : true, "IN-UP" : true,
- "IN-UT" : true, "IN-WB" : true, "IQ-AN" : true, "IQ-AR" : true, "IQ-BA" : true,
- "IQ-BB" : true, "IQ-BG" : true, "IQ-DA" : true, "IQ-DI" : true, "IQ-DQ" : true,
- "IQ-KA" : true, "IQ-MA" : true, "IQ-MU" : true, "IQ-NA" : true, "IQ-NI" : true,
- "IQ-QA" : true, "IQ-SD" : true, "IQ-SW" : true, "IQ-TS" : true, "IQ-WA" : true,
- "IR-01" : true, "IR-02" : true, "IR-03" : true, "IR-04" : true, "IR-05" : true,
- "IR-06" : true, "IR-07" : true, "IR-08" : true, "IR-10" : true, "IR-11" : true,
- "IR-12" : true, "IR-13" : true, "IR-14" : true, "IR-15" : true, "IR-16" : true,
- "IR-17" : true, "IR-18" : true, "IR-19" : true, "IR-20" : true, "IR-21" : true,
- "IR-22" : true, "IR-23" : true, "IR-24" : true, "IR-25" : true, "IR-26" : true,
- "IR-27" : true, "IR-28" : true, "IR-29" : true, "IR-30" : true, "IR-31" : true,
- "IS-0" : true, "IS-1" : true, "IS-2" : true, "IS-3" : true, "IS-4" : true,
- "IS-5" : true, "IS-6" : true, "IS-7" : true, "IS-8" : true, "IT-21" : true,
- "IT-23" : true, "IT-25" : true, "IT-32" : true, "IT-34" : true, "IT-36" : true,
- "IT-42" : true, "IT-45" : true, "IT-52" : true, "IT-55" : true, "IT-57" : true,
- "IT-62" : true, "IT-65" : true, "IT-67" : true, "IT-72" : true, "IT-75" : true,
- "IT-77" : true, "IT-78" : true, "IT-82" : true, "IT-88" : true, "IT-AG" : true,
- "IT-AL" : true, "IT-AN" : true, "IT-AO" : true, "IT-AP" : true, "IT-AQ" : true,
- "IT-AR" : true, "IT-AT" : true, "IT-AV" : true, "IT-BA" : true, "IT-BG" : true,
- "IT-BI" : true, "IT-BL" : true, "IT-BN" : true, "IT-BO" : true, "IT-BR" : true,
- "IT-BS" : true, "IT-BT" : true, "IT-BZ" : true, "IT-CA" : true, "IT-CB" : true,
- "IT-CE" : true, "IT-CH" : true, "IT-CI" : true, "IT-CL" : true, "IT-CN" : true,
- "IT-CO" : true, "IT-CR" : true, "IT-CS" : true, "IT-CT" : true, "IT-CZ" : true,
- "IT-EN" : true, "IT-FC" : true, "IT-FE" : true, "IT-FG" : true, "IT-FI" : true,
- "IT-FM" : true, "IT-FR" : true, "IT-GE" : true, "IT-GO" : true, "IT-GR" : true,
- "IT-IM" : true, "IT-IS" : true, "IT-KR" : true, "IT-LC" : true, "IT-LE" : true,
- "IT-LI" : true, "IT-LO" : true, "IT-LT" : true, "IT-LU" : true, "IT-MB" : true,
- "IT-MC" : true, "IT-ME" : true, "IT-MI" : true, "IT-MN" : true, "IT-MO" : true,
- "IT-MS" : true, "IT-MT" : true, "IT-NA" : true, "IT-NO" : true, "IT-NU" : true,
- "IT-OG" : true, "IT-OR" : true, "IT-OT" : true, "IT-PA" : true, "IT-PC" : true,
- "IT-PD" : true, "IT-PE" : true, "IT-PG" : true, "IT-PI" : true, "IT-PN" : true,
- "IT-PO" : true, "IT-PR" : true, "IT-PT" : true, "IT-PU" : true, "IT-PV" : true,
- "IT-PZ" : true, "IT-RA" : true, "IT-RC" : true, "IT-RE" : true, "IT-RG" : true,
- "IT-RI" : true, "IT-RM" : true, "IT-RN" : true, "IT-RO" : true, "IT-SA" : true,
- "IT-SI" : true, "IT-SO" : true, "IT-SP" : true, "IT-SR" : true, "IT-SS" : true,
- "IT-SV" : true, "IT-TA" : true, "IT-TE" : true, "IT-TN" : true, "IT-TO" : true,
- "IT-TP" : true, "IT-TR" : true, "IT-TS" : true, "IT-TV" : true, "IT-UD" : true,
- "IT-VA" : true, "IT-VB" : true, "IT-VC" : true, "IT-VE" : true, "IT-VI" : true,
- "IT-VR" : true, "IT-VS" : true, "IT-VT" : true, "IT-VV" : true, "JM-01" : true,
- "JM-02" : true, "JM-03" : true, "JM-04" : true, "JM-05" : true, "JM-06" : true,
- "JM-07" : true, "JM-08" : true, "JM-09" : true, "JM-10" : true, "JM-11" : true,
- "JM-12" : true, "JM-13" : true, "JM-14" : true, "JO-AJ" : true, "JO-AM" : true,
- "JO-AQ" : true, "JO-AT" : true, "JO-AZ" : true, "JO-BA" : true, "JO-IR" : true,
- "JO-JA" : true, "JO-KA" : true, "JO-MA" : true, "JO-MD" : true, "JO-MN" : true,
- "JP-01" : true, "JP-02" : true, "JP-03" : true, "JP-04" : true, "JP-05" : true,
- "JP-06" : true, "JP-07" : true, "JP-08" : true, "JP-09" : true, "JP-10" : true,
- "JP-11" : true, "JP-12" : true, "JP-13" : true, "JP-14" : true, "JP-15" : true,
- "JP-16" : true, "JP-17" : true, "JP-18" : true, "JP-19" : true, "JP-20" : true,
- "JP-21" : true, "JP-22" : true, "JP-23" : true, "JP-24" : true, "JP-25" : true,
- "JP-26" : true, "JP-27" : true, "JP-28" : true, "JP-29" : true, "JP-30" : true,
- "JP-31" : true, "JP-32" : true, "JP-33" : true, "JP-34" : true, "JP-35" : true,
- "JP-36" : true, "JP-37" : true, "JP-38" : true, "JP-39" : true, "JP-40" : true,
- "JP-41" : true, "JP-42" : true, "JP-43" : true, "JP-44" : true, "JP-45" : true,
- "JP-46" : true, "JP-47" : true, "KE-110" : true, "KE-200" : true, "KE-300" : true,
- "KE-400" : true, "KE-500" : true, "KE-700" : true, "KE-800" : true, "KG-B" : true,
- "KG-C" : true, "KG-GB" : true, "KG-J" : true, "KG-N" : true, "KG-O" : true,
- "KG-T" : true, "KG-Y" : true, "KH-1" : true, "KH-10" : true, "KH-11" : true,
- "KH-12" : true, "KH-13" : true, "KH-14" : true, "KH-15" : true, "KH-16" : true,
- "KH-17" : true, "KH-18" : true, "KH-19" : true, "KH-2" : true, "KH-20" : true,
- "KH-21" : true, "KH-22" : true, "KH-23" : true, "KH-24" : true, "KH-3" : true,
- "KH-4" : true, "KH-5" : true, "KH-6" : true, "KH-7" : true, "KH-8" : true,
- "KH-9" : true, "KI-G" : true, "KI-L" : true, "KI-P" : true, "KM-A" : true,
- "KM-G" : true, "KM-M" : true, "KN-01" : true, "KN-02" : true, "KN-03" : true,
- "KN-04" : true, "KN-05" : true, "KN-06" : true, "KN-07" : true, "KN-08" : true,
- "KN-09" : true, "KN-10" : true, "KN-11" : true, "KN-12" : true, "KN-13" : true,
- "KN-15" : true, "KN-K" : true, "KN-N" : true, "KP-01" : true, "KP-02" : true,
- "KP-03" : true, "KP-04" : true, "KP-05" : true, "KP-06" : true, "KP-07" : true,
- "KP-08" : true, "KP-09" : true, "KP-10" : true, "KP-13" : true, "KR-11" : true,
- "KR-26" : true, "KR-27" : true, "KR-28" : true, "KR-29" : true, "KR-30" : true,
- "KR-31" : true, "KR-41" : true, "KR-42" : true, "KR-43" : true, "KR-44" : true,
- "KR-45" : true, "KR-46" : true, "KR-47" : true, "KR-48" : true, "KR-49" : true,
- "KW-AH" : true, "KW-FA" : true, "KW-HA" : true, "KW-JA" : true, "KW-KU" : true,
- "KW-MU" : true, "KZ-AKM" : true, "KZ-AKT" : true, "KZ-ALA" : true, "KZ-ALM" : true,
- "KZ-AST" : true, "KZ-ATY" : true, "KZ-KAR" : true, "KZ-KUS" : true, "KZ-KZY" : true,
- "KZ-MAN" : true, "KZ-PAV" : true, "KZ-SEV" : true, "KZ-VOS" : true, "KZ-YUZ" : true,
- "KZ-ZAP" : true, "KZ-ZHA" : true, "LA-AT" : true, "LA-BK" : true, "LA-BL" : true,
- "LA-CH" : true, "LA-HO" : true, "LA-KH" : true, "LA-LM" : true, "LA-LP" : true,
- "LA-OU" : true, "LA-PH" : true, "LA-SL" : true, "LA-SV" : true, "LA-VI" : true,
- "LA-VT" : true, "LA-XA" : true, "LA-XE" : true, "LA-XI" : true, "LA-XS" : true,
- "LB-AK" : true, "LB-AS" : true, "LB-BA" : true, "LB-BH" : true, "LB-BI" : true,
- "LB-JA" : true, "LB-JL" : true, "LB-NA" : true, "LI-01" : true, "LI-02" : true,
- "LI-03" : true, "LI-04" : true, "LI-05" : true, "LI-06" : true, "LI-07" : true,
- "LI-08" : true, "LI-09" : true, "LI-10" : true, "LI-11" : true, "LK-1" : true,
- "LK-11" : true, "LK-12" : true, "LK-13" : true, "LK-2" : true, "LK-21" : true,
- "LK-22" : true, "LK-23" : true, "LK-3" : true, "LK-31" : true, "LK-32" : true,
- "LK-33" : true, "LK-4" : true, "LK-41" : true, "LK-42" : true, "LK-43" : true,
- "LK-44" : true, "LK-45" : true, "LK-5" : true, "LK-51" : true, "LK-52" : true,
- "LK-53" : true, "LK-6" : true, "LK-61" : true, "LK-62" : true, "LK-7" : true,
- "LK-71" : true, "LK-72" : true, "LK-8" : true, "LK-81" : true, "LK-82" : true,
- "LK-9" : true, "LK-91" : true, "LK-92" : true, "LR-BG" : true, "LR-BM" : true,
- "LR-CM" : true, "LR-GB" : true, "LR-GG" : true, "LR-GK" : true, "LR-LO" : true,
- "LR-MG" : true, "LR-MO" : true, "LR-MY" : true, "LR-NI" : true, "LR-RI" : true,
- "LR-SI" : true, "LS-A" : true, "LS-B" : true, "LS-C" : true, "LS-D" : true,
- "LS-E" : true, "LS-F" : true, "LS-G" : true, "LS-H" : true, "LS-J" : true,
- "LS-K" : true, "LT-AL" : true, "LT-KL" : true, "LT-KU" : true, "LT-MR" : true,
- "LT-PN" : true, "LT-SA" : true, "LT-TA" : true, "LT-TE" : true, "LT-UT" : true,
- "LT-VL" : true, "LU-D" : true, "LU-G" : true, "LU-L" : true, "LV-001" : true,
- "LV-002" : true, "LV-003" : true, "LV-004" : true, "LV-005" : true, "LV-006" : true,
- "LV-007" : true, "LV-008" : true, "LV-009" : true, "LV-010" : true, "LV-011" : true,
- "LV-012" : true, "LV-013" : true, "LV-014" : true, "LV-015" : true, "LV-016" : true,
- "LV-017" : true, "LV-018" : true, "LV-019" : true, "LV-020" : true, "LV-021" : true,
- "LV-022" : true, "LV-023" : true, "LV-024" : true, "LV-025" : true, "LV-026" : true,
- "LV-027" : true, "LV-028" : true, "LV-029" : true, "LV-030" : true, "LV-031" : true,
- "LV-032" : true, "LV-033" : true, "LV-034" : true, "LV-035" : true, "LV-036" : true,
- "LV-037" : true, "LV-038" : true, "LV-039" : true, "LV-040" : true, "LV-041" : true,
- "LV-042" : true, "LV-043" : true, "LV-044" : true, "LV-045" : true, "LV-046" : true,
- "LV-047" : true, "LV-048" : true, "LV-049" : true, "LV-050" : true, "LV-051" : true,
- "LV-052" : true, "LV-053" : true, "LV-054" : true, "LV-055" : true, "LV-056" : true,
- "LV-057" : true, "LV-058" : true, "LV-059" : true, "LV-060" : true, "LV-061" : true,
- "LV-062" : true, "LV-063" : true, "LV-064" : true, "LV-065" : true, "LV-066" : true,
- "LV-067" : true, "LV-068" : true, "LV-069" : true, "LV-070" : true, "LV-071" : true,
- "LV-072" : true, "LV-073" : true, "LV-074" : true, "LV-075" : true, "LV-076" : true,
- "LV-077" : true, "LV-078" : true, "LV-079" : true, "LV-080" : true, "LV-081" : true,
- "LV-082" : true, "LV-083" : true, "LV-084" : true, "LV-085" : true, "LV-086" : true,
- "LV-087" : true, "LV-088" : true, "LV-089" : true, "LV-090" : true, "LV-091" : true,
- "LV-092" : true, "LV-093" : true, "LV-094" : true, "LV-095" : true, "LV-096" : true,
- "LV-097" : true, "LV-098" : true, "LV-099" : true, "LV-100" : true, "LV-101" : true,
- "LV-102" : true, "LV-103" : true, "LV-104" : true, "LV-105" : true, "LV-106" : true,
- "LV-107" : true, "LV-108" : true, "LV-109" : true, "LV-110" : true, "LV-DGV" : true,
- "LV-JEL" : true, "LV-JKB" : true, "LV-JUR" : true, "LV-LPX" : true, "LV-REZ" : true,
- "LV-RIX" : true, "LV-VEN" : true, "LV-VMR" : true, "LY-BA" : true, "LY-BU" : true,
- "LY-DR" : true, "LY-GT" : true, "LY-JA" : true, "LY-JB" : true, "LY-JG" : true,
- "LY-JI" : true, "LY-JU" : true, "LY-KF" : true, "LY-MB" : true, "LY-MI" : true,
- "LY-MJ" : true, "LY-MQ" : true, "LY-NL" : true, "LY-NQ" : true, "LY-SB" : true,
- "LY-SR" : true, "LY-TB" : true, "LY-WA" : true, "LY-WD" : true, "LY-WS" : true,
- "LY-ZA" : true, "MA-01" : true, "MA-02" : true, "MA-03" : true, "MA-04" : true,
- "MA-05" : true, "MA-06" : true, "MA-07" : true, "MA-08" : true, "MA-09" : true,
- "MA-10" : true, "MA-11" : true, "MA-12" : true, "MA-13" : true, "MA-14" : true,
- "MA-15" : true, "MA-16" : true, "MA-AGD" : true, "MA-AOU" : true, "MA-ASZ" : true,
- "MA-AZI" : true, "MA-BEM" : true, "MA-BER" : true, "MA-BES" : true, "MA-BOD" : true,
- "MA-BOM" : true, "MA-CAS" : true, "MA-CHE" : true, "MA-CHI" : true, "MA-CHT" : true,
- "MA-ERR" : true, "MA-ESI" : true, "MA-ESM" : true, "MA-FAH" : true, "MA-FES" : true,
- "MA-FIG" : true, "MA-GUE" : true, "MA-HAJ" : true, "MA-HAO" : true, "MA-HOC" : true,
- "MA-IFR" : true, "MA-INE" : true, "MA-JDI" : true, "MA-JRA" : true, "MA-KEN" : true,
- "MA-KES" : true, "MA-KHE" : true, "MA-KHN" : true, "MA-KHO" : true, "MA-LAA" : true,
- "MA-LAR" : true, "MA-MED" : true, "MA-MEK" : true, "MA-MMD" : true, "MA-MMN" : true,
- "MA-MOH" : true, "MA-MOU" : true, "MA-NAD" : true, "MA-NOU" : true, "MA-OUA" : true,
- "MA-OUD" : true, "MA-OUJ" : true, "MA-RAB" : true, "MA-SAF" : true, "MA-SAL" : true,
- "MA-SEF" : true, "MA-SET" : true, "MA-SIK" : true, "MA-SKH" : true, "MA-SYB" : true,
- "MA-TAI" : true, "MA-TAO" : true, "MA-TAR" : true, "MA-TAT" : true, "MA-TAZ" : true,
- "MA-TET" : true, "MA-TIZ" : true, "MA-TNG" : true, "MA-TNT" : true, "MA-ZAG" : true,
- "MC-CL" : true, "MC-CO" : true, "MC-FO" : true, "MC-GA" : true, "MC-JE" : true,
- "MC-LA" : true, "MC-MA" : true, "MC-MC" : true, "MC-MG" : true, "MC-MO" : true,
- "MC-MU" : true, "MC-PH" : true, "MC-SD" : true, "MC-SO" : true, "MC-SP" : true,
- "MC-SR" : true, "MC-VR" : true, "MD-AN" : true, "MD-BA" : true, "MD-BD" : true,
- "MD-BR" : true, "MD-BS" : true, "MD-CA" : true, "MD-CL" : true, "MD-CM" : true,
- "MD-CR" : true, "MD-CS" : true, "MD-CT" : true, "MD-CU" : true, "MD-DO" : true,
- "MD-DR" : true, "MD-DU" : true, "MD-ED" : true, "MD-FA" : true, "MD-FL" : true,
- "MD-GA" : true, "MD-GL" : true, "MD-HI" : true, "MD-IA" : true, "MD-LE" : true,
- "MD-NI" : true, "MD-OC" : true, "MD-OR" : true, "MD-RE" : true, "MD-RI" : true,
- "MD-SD" : true, "MD-SI" : true, "MD-SN" : true, "MD-SO" : true, "MD-ST" : true,
- "MD-SV" : true, "MD-TA" : true, "MD-TE" : true, "MD-UN" : true, "ME-01" : true,
- "ME-02" : true, "ME-03" : true, "ME-04" : true, "ME-05" : true, "ME-06" : true,
- "ME-07" : true, "ME-08" : true, "ME-09" : true, "ME-10" : true, "ME-11" : true,
- "ME-12" : true, "ME-13" : true, "ME-14" : true, "ME-15" : true, "ME-16" : true,
- "ME-17" : true, "ME-18" : true, "ME-19" : true, "ME-20" : true, "ME-21" : true,
- "MG-A" : true, "MG-D" : true, "MG-F" : true, "MG-M" : true, "MG-T" : true,
- "MG-U" : true, "MH-ALK" : true, "MH-ALL" : true, "MH-ARN" : true, "MH-AUR" : true,
- "MH-EBO" : true, "MH-ENI" : true, "MH-JAB" : true, "MH-JAL" : true, "MH-KIL" : true,
- "MH-KWA" : true, "MH-L" : true, "MH-LAE" : true, "MH-LIB" : true, "MH-LIK" : true,
- "MH-MAJ" : true, "MH-MAL" : true, "MH-MEJ" : true, "MH-MIL" : true, "MH-NMK" : true,
- "MH-NMU" : true, "MH-RON" : true, "MH-T" : true, "MH-UJA" : true, "MH-UTI" : true,
- "MH-WTJ" : true, "MH-WTN" : true, "MK-01" : true, "MK-02" : true, "MK-03" : true,
- "MK-04" : true, "MK-05" : true, "MK-06" : true, "MK-07" : true, "MK-08" : true,
- "MK-09" : true, "MK-10" : true, "MK-11" : true, "MK-12" : true, "MK-13" : true,
- "MK-14" : true, "MK-15" : true, "MK-16" : true, "MK-17" : true, "MK-18" : true,
- "MK-19" : true, "MK-20" : true, "MK-21" : true, "MK-22" : true, "MK-23" : true,
- "MK-24" : true, "MK-25" : true, "MK-26" : true, "MK-27" : true, "MK-28" : true,
- "MK-29" : true, "MK-30" : true, "MK-31" : true, "MK-32" : true, "MK-33" : true,
- "MK-34" : true, "MK-35" : true, "MK-36" : true, "MK-37" : true, "MK-38" : true,
- "MK-39" : true, "MK-40" : true, "MK-41" : true, "MK-42" : true, "MK-43" : true,
- "MK-44" : true, "MK-45" : true, "MK-46" : true, "MK-47" : true, "MK-48" : true,
- "MK-49" : true, "MK-50" : true, "MK-51" : true, "MK-52" : true, "MK-53" : true,
- "MK-54" : true, "MK-55" : true, "MK-56" : true, "MK-57" : true, "MK-58" : true,
- "MK-59" : true, "MK-60" : true, "MK-61" : true, "MK-62" : true, "MK-63" : true,
- "MK-64" : true, "MK-65" : true, "MK-66" : true, "MK-67" : true, "MK-68" : true,
- "MK-69" : true, "MK-70" : true, "MK-71" : true, "MK-72" : true, "MK-73" : true,
- "MK-74" : true, "MK-75" : true, "MK-76" : true, "MK-77" : true, "MK-78" : true,
- "MK-79" : true, "MK-80" : true, "MK-81" : true, "MK-82" : true, "MK-83" : true,
- "MK-84" : true, "ML-1" : true, "ML-2" : true, "ML-3" : true, "ML-4" : true,
- "ML-5" : true, "ML-6" : true, "ML-7" : true, "ML-8" : true, "ML-BK0" : true,
- "MM-01" : true, "MM-02" : true, "MM-03" : true, "MM-04" : true, "MM-05" : true,
- "MM-06" : true, "MM-07" : true, "MM-11" : true, "MM-12" : true, "MM-13" : true,
- "MM-14" : true, "MM-15" : true, "MM-16" : true, "MM-17" : true, "MN-035" : true,
- "MN-037" : true, "MN-039" : true, "MN-041" : true, "MN-043" : true, "MN-046" : true,
- "MN-047" : true, "MN-049" : true, "MN-051" : true, "MN-053" : true, "MN-055" : true,
- "MN-057" : true, "MN-059" : true, "MN-061" : true, "MN-063" : true, "MN-064" : true,
- "MN-065" : true, "MN-067" : true, "MN-069" : true, "MN-071" : true, "MN-073" : true,
- "MN-1" : true, "MR-01" : true, "MR-02" : true, "MR-03" : true, "MR-04" : true,
- "MR-05" : true, "MR-06" : true, "MR-07" : true, "MR-08" : true, "MR-09" : true,
- "MR-10" : true, "MR-11" : true, "MR-12" : true, "MR-NKC" : true, "MT-01" : true,
- "MT-02" : true, "MT-03" : true, "MT-04" : true, "MT-05" : true, "MT-06" : true,
- "MT-07" : true, "MT-08" : true, "MT-09" : true, "MT-10" : true, "MT-11" : true,
- "MT-12" : true, "MT-13" : true, "MT-14" : true, "MT-15" : true, "MT-16" : true,
- "MT-17" : true, "MT-18" : true, "MT-19" : true, "MT-20" : true, "MT-21" : true,
- "MT-22" : true, "MT-23" : true, "MT-24" : true, "MT-25" : true, "MT-26" : true,
- "MT-27" : true, "MT-28" : true, "MT-29" : true, "MT-30" : true, "MT-31" : true,
- "MT-32" : true, "MT-33" : true, "MT-34" : true, "MT-35" : true, "MT-36" : true,
- "MT-37" : true, "MT-38" : true, "MT-39" : true, "MT-40" : true, "MT-41" : true,
- "MT-42" : true, "MT-43" : true, "MT-44" : true, "MT-45" : true, "MT-46" : true,
- "MT-47" : true, "MT-48" : true, "MT-49" : true, "MT-50" : true, "MT-51" : true,
- "MT-52" : true, "MT-53" : true, "MT-54" : true, "MT-55" : true, "MT-56" : true,
- "MT-57" : true, "MT-58" : true, "MT-59" : true, "MT-60" : true, "MT-61" : true,
- "MT-62" : true, "MT-63" : true, "MT-64" : true, "MT-65" : true, "MT-66" : true,
- "MT-67" : true, "MT-68" : true, "MU-AG" : true, "MU-BL" : true, "MU-BR" : true,
- "MU-CC" : true, "MU-CU" : true, "MU-FL" : true, "MU-GP" : true, "MU-MO" : true,
- "MU-PA" : true, "MU-PL" : true, "MU-PU" : true, "MU-PW" : true, "MU-QB" : true,
- "MU-RO" : true, "MU-RP" : true, "MU-SA" : true, "MU-VP" : true, "MV-00" : true,
- "MV-01" : true, "MV-02" : true, "MV-03" : true, "MV-04" : true, "MV-05" : true,
- "MV-07" : true, "MV-08" : true, "MV-12" : true, "MV-13" : true, "MV-14" : true,
- "MV-17" : true, "MV-20" : true, "MV-23" : true, "MV-24" : true, "MV-25" : true,
- "MV-26" : true, "MV-27" : true, "MV-28" : true, "MV-29" : true, "MV-CE" : true,
- "MV-MLE" : true, "MV-NC" : true, "MV-NO" : true, "MV-SC" : true, "MV-SU" : true,
- "MV-UN" : true, "MV-US" : true, "MW-BA" : true, "MW-BL" : true, "MW-C" : true,
- "MW-CK" : true, "MW-CR" : true, "MW-CT" : true, "MW-DE" : true, "MW-DO" : true,
- "MW-KR" : true, "MW-KS" : true, "MW-LI" : true, "MW-LK" : true, "MW-MC" : true,
- "MW-MG" : true, "MW-MH" : true, "MW-MU" : true, "MW-MW" : true, "MW-MZ" : true,
- "MW-N" : true, "MW-NB" : true, "MW-NE" : true, "MW-NI" : true, "MW-NK" : true,
- "MW-NS" : true, "MW-NU" : true, "MW-PH" : true, "MW-RU" : true, "MW-S" : true,
- "MW-SA" : true, "MW-TH" : true, "MW-ZO" : true, "MX-AGU" : true, "MX-BCN" : true,
- "MX-BCS" : true, "MX-CAM" : true, "MX-CHH" : true, "MX-CHP" : true, "MX-COA" : true,
- "MX-COL" : true, "MX-DIF" : true, "MX-DUR" : true, "MX-GRO" : true, "MX-GUA" : true,
- "MX-HID" : true, "MX-JAL" : true, "MX-MEX" : true, "MX-MIC" : true, "MX-MOR" : true,
- "MX-NAY" : true, "MX-NLE" : true, "MX-OAX" : true, "MX-PUE" : true, "MX-QUE" : true,
- "MX-ROO" : true, "MX-SIN" : true, "MX-SLP" : true, "MX-SON" : true, "MX-TAB" : true,
- "MX-TAM" : true, "MX-TLA" : true, "MX-VER" : true, "MX-YUC" : true, "MX-ZAC" : true,
- "MY-01" : true, "MY-02" : true, "MY-03" : true, "MY-04" : true, "MY-05" : true,
- "MY-06" : true, "MY-07" : true, "MY-08" : true, "MY-09" : true, "MY-10" : true,
- "MY-11" : true, "MY-12" : true, "MY-13" : true, "MY-14" : true, "MY-15" : true,
- "MY-16" : true, "MZ-A" : true, "MZ-B" : true, "MZ-G" : true, "MZ-I" : true,
- "MZ-L" : true, "MZ-MPM" : true, "MZ-N" : true, "MZ-P" : true, "MZ-Q" : true,
- "MZ-S" : true, "MZ-T" : true, "NA-CA" : true, "NA-ER" : true, "NA-HA" : true,
- "NA-KA" : true, "NA-KH" : true, "NA-KU" : true, "NA-OD" : true, "NA-OH" : true,
- "NA-OK" : true, "NA-ON" : true, "NA-OS" : true, "NA-OT" : true, "NA-OW" : true,
- "NE-1" : true, "NE-2" : true, "NE-3" : true, "NE-4" : true, "NE-5" : true,
- "NE-6" : true, "NE-7" : true, "NE-8" : true, "NG-AB" : true, "NG-AD" : true,
- "NG-AK" : true, "NG-AN" : true, "NG-BA" : true, "NG-BE" : true, "NG-BO" : true,
- "NG-BY" : true, "NG-CR" : true, "NG-DE" : true, "NG-EB" : true, "NG-ED" : true,
- "NG-EK" : true, "NG-EN" : true, "NG-FC" : true, "NG-GO" : true, "NG-IM" : true,
- "NG-JI" : true, "NG-KD" : true, "NG-KE" : true, "NG-KN" : true, "NG-KO" : true,
- "NG-KT" : true, "NG-KW" : true, "NG-LA" : true, "NG-NA" : true, "NG-NI" : true,
- "NG-OG" : true, "NG-ON" : true, "NG-OS" : true, "NG-OY" : true, "NG-PL" : true,
- "NG-RI" : true, "NG-SO" : true, "NG-TA" : true, "NG-YO" : true, "NG-ZA" : true,
- "NI-AN" : true, "NI-AS" : true, "NI-BO" : true, "NI-CA" : true, "NI-CI" : true,
- "NI-CO" : true, "NI-ES" : true, "NI-GR" : true, "NI-JI" : true, "NI-LE" : true,
- "NI-MD" : true, "NI-MN" : true, "NI-MS" : true, "NI-MT" : true, "NI-NS" : true,
- "NI-RI" : true, "NI-SJ" : true, "NL-AW" : true, "NL-BQ1" : true, "NL-BQ2" : true,
- "NL-BQ3" : true, "NL-CW" : true, "NL-DR" : true, "NL-FL" : true, "NL-FR" : true,
- "NL-GE" : true, "NL-GR" : true, "NL-LI" : true, "NL-NB" : true, "NL-NH" : true,
- "NL-OV" : true, "NL-SX" : true, "NL-UT" : true, "NL-ZE" : true, "NL-ZH" : true,
- "NO-01" : true, "NO-02" : true, "NO-03" : true, "NO-04" : true, "NO-05" : true,
- "NO-06" : true, "NO-07" : true, "NO-08" : true, "NO-09" : true, "NO-10" : true,
- "NO-11" : true, "NO-12" : true, "NO-14" : true, "NO-15" : true, "NO-16" : true,
- "NO-17" : true, "NO-18" : true, "NO-19" : true, "NO-20" : true, "NO-21" : true,
- "NO-22" : true, "NP-1" : true, "NP-2" : true, "NP-3" : true, "NP-4" : true,
- "NP-5" : true, "NP-BA" : true, "NP-BH" : true, "NP-DH" : true, "NP-GA" : true,
- "NP-JA" : true, "NP-KA" : true, "NP-KO" : true, "NP-LU" : true, "NP-MA" : true,
- "NP-ME" : true, "NP-NA" : true, "NP-RA" : true, "NP-SA" : true, "NP-SE" : true,
- "NR-01" : true, "NR-02" : true, "NR-03" : true, "NR-04" : true, "NR-05" : true,
- "NR-06" : true, "NR-07" : true, "NR-08" : true, "NR-09" : true, "NR-10" : true,
- "NR-11" : true, "NR-12" : true, "NR-13" : true, "NR-14" : true, "NZ-AUK" : true,
- "NZ-BOP" : true, "NZ-CAN" : true, "NZ-CIT" : true, "NZ-GIS" : true, "NZ-HKB" : true,
- "NZ-MBH" : true, "NZ-MWT" : true, "NZ-N" : true, "NZ-NSN" : true, "NZ-NTL" : true,
- "NZ-OTA" : true, "NZ-S" : true, "NZ-STL" : true, "NZ-TAS" : true, "NZ-TKI" : true,
- "NZ-WGN" : true, "NZ-WKO" : true, "NZ-WTC" : true, "OM-BA" : true, "OM-BU" : true,
- "OM-DA" : true, "OM-MA" : true, "OM-MU" : true, "OM-SH" : true, "OM-WU" : true,
- "OM-ZA" : true, "OM-ZU" : true, "PA-1" : true, "PA-2" : true, "PA-3" : true,
- "PA-4" : true, "PA-5" : true, "PA-6" : true, "PA-7" : true, "PA-8" : true,
- "PA-9" : true, "PA-EM" : true, "PA-KY" : true, "PA-NB" : true, "PE-AMA" : true,
- "PE-ANC" : true, "PE-APU" : true, "PE-ARE" : true, "PE-AYA" : true, "PE-CAJ" : true,
- "PE-CAL" : true, "PE-CUS" : true, "PE-HUC" : true, "PE-HUV" : true, "PE-ICA" : true,
- "PE-JUN" : true, "PE-LAL" : true, "PE-LAM" : true, "PE-LIM" : true, "PE-LMA" : true,
- "PE-LOR" : true, "PE-MDD" : true, "PE-MOQ" : true, "PE-PAS" : true, "PE-PIU" : true,
- "PE-PUN" : true, "PE-SAM" : true, "PE-TAC" : true, "PE-TUM" : true, "PE-UCA" : true,
- "PG-CPK" : true, "PG-CPM" : true, "PG-EBR" : true, "PG-EHG" : true, "PG-EPW" : true,
- "PG-ESW" : true, "PG-GPK" : true, "PG-MBA" : true, "PG-MPL" : true, "PG-MPM" : true,
- "PG-MRL" : true, "PG-NCD" : true, "PG-NIK" : true, "PG-NPP" : true, "PG-NSB" : true,
- "PG-SAN" : true, "PG-SHM" : true, "PG-WBK" : true, "PG-WHM" : true, "PG-WPD" : true,
- "PH-00" : true, "PH-01" : true, "PH-02" : true, "PH-03" : true, "PH-05" : true,
- "PH-06" : true, "PH-07" : true, "PH-08" : true, "PH-09" : true, "PH-10" : true,
- "PH-11" : true, "PH-12" : true, "PH-13" : true, "PH-14" : true, "PH-15" : true,
- "PH-40" : true, "PH-41" : true, "PH-ABR" : true, "PH-AGN" : true, "PH-AGS" : true,
- "PH-AKL" : true, "PH-ALB" : true, "PH-ANT" : true, "PH-APA" : true, "PH-AUR" : true,
- "PH-BAN" : true, "PH-BAS" : true, "PH-BEN" : true, "PH-BIL" : true, "PH-BOH" : true,
- "PH-BTG" : true, "PH-BTN" : true, "PH-BUK" : true, "PH-BUL" : true, "PH-CAG" : true,
- "PH-CAM" : true, "PH-CAN" : true, "PH-CAP" : true, "PH-CAS" : true, "PH-CAT" : true,
- "PH-CAV" : true, "PH-CEB" : true, "PH-COM" : true, "PH-DAO" : true, "PH-DAS" : true,
- "PH-DAV" : true, "PH-DIN" : true, "PH-EAS" : true, "PH-GUI" : true, "PH-IFU" : true,
- "PH-ILI" : true, "PH-ILN" : true, "PH-ILS" : true, "PH-ISA" : true, "PH-KAL" : true,
- "PH-LAG" : true, "PH-LAN" : true, "PH-LAS" : true, "PH-LEY" : true, "PH-LUN" : true,
- "PH-MAD" : true, "PH-MAG" : true, "PH-MAS" : true, "PH-MDC" : true, "PH-MDR" : true,
- "PH-MOU" : true, "PH-MSC" : true, "PH-MSR" : true, "PH-NCO" : true, "PH-NEC" : true,
- "PH-NER" : true, "PH-NSA" : true, "PH-NUE" : true, "PH-NUV" : true, "PH-PAM" : true,
- "PH-PAN" : true, "PH-PLW" : true, "PH-QUE" : true, "PH-QUI" : true, "PH-RIZ" : true,
- "PH-ROM" : true, "PH-SAR" : true, "PH-SCO" : true, "PH-SIG" : true, "PH-SLE" : true,
- "PH-SLU" : true, "PH-SOR" : true, "PH-SUK" : true, "PH-SUN" : true, "PH-SUR" : true,
- "PH-TAR" : true, "PH-TAW" : true, "PH-WSA" : true, "PH-ZAN" : true, "PH-ZAS" : true,
- "PH-ZMB" : true, "PH-ZSI" : true, "PK-BA" : true, "PK-GB" : true, "PK-IS" : true,
- "PK-JK" : true, "PK-KP" : true, "PK-PB" : true, "PK-SD" : true, "PK-TA" : true,
- "PL-DS" : true, "PL-KP" : true, "PL-LB" : true, "PL-LD" : true, "PL-LU" : true,
- "PL-MA" : true, "PL-MZ" : true, "PL-OP" : true, "PL-PD" : true, "PL-PK" : true,
- "PL-PM" : true, "PL-SK" : true, "PL-SL" : true, "PL-WN" : true, "PL-WP" : true,
- "PL-ZP" : true, "PS-BTH" : true, "PS-DEB" : true, "PS-GZA" : true, "PS-HBN" : true,
- "PS-JEM" : true, "PS-JEN" : true, "PS-JRH" : true, "PS-KYS" : true, "PS-NBS" : true,
- "PS-NGZ" : true, "PS-QQA" : true, "PS-RBH" : true, "PS-RFH" : true, "PS-SLT" : true,
- "PS-TBS" : true, "PS-TKM" : true, "PT-01" : true, "PT-02" : true, "PT-03" : true,
- "PT-04" : true, "PT-05" : true, "PT-06" : true, "PT-07" : true, "PT-08" : true,
- "PT-09" : true, "PT-10" : true, "PT-11" : true, "PT-12" : true, "PT-13" : true,
- "PT-14" : true, "PT-15" : true, "PT-16" : true, "PT-17" : true, "PT-18" : true,
- "PT-20" : true, "PT-30" : true, "PW-002" : true, "PW-004" : true, "PW-010" : true,
- "PW-050" : true, "PW-100" : true, "PW-150" : true, "PW-212" : true, "PW-214" : true,
- "PW-218" : true, "PW-222" : true, "PW-224" : true, "PW-226" : true, "PW-227" : true,
- "PW-228" : true, "PW-350" : true, "PW-370" : true, "PY-1" : true, "PY-10" : true,
- "PY-11" : true, "PY-12" : true, "PY-13" : true, "PY-14" : true, "PY-15" : true,
- "PY-16" : true, "PY-19" : true, "PY-2" : true, "PY-3" : true, "PY-4" : true,
- "PY-5" : true, "PY-6" : true, "PY-7" : true, "PY-8" : true, "PY-9" : true,
- "PY-ASU" : true, "QA-DA" : true, "QA-KH" : true, "QA-MS" : true, "QA-RA" : true,
- "QA-US" : true, "QA-WA" : true, "QA-ZA" : true, "RO-AB" : true, "RO-AG" : true,
- "RO-AR" : true, "RO-B" : true, "RO-BC" : true, "RO-BH" : true, "RO-BN" : true,
- "RO-BR" : true, "RO-BT" : true, "RO-BV" : true, "RO-BZ" : true, "RO-CJ" : true,
- "RO-CL" : true, "RO-CS" : true, "RO-CT" : true, "RO-CV" : true, "RO-DB" : true,
- "RO-DJ" : true, "RO-GJ" : true, "RO-GL" : true, "RO-GR" : true, "RO-HD" : true,
- "RO-HR" : true, "RO-IF" : true, "RO-IL" : true, "RO-IS" : true, "RO-MH" : true,
- "RO-MM" : true, "RO-MS" : true, "RO-NT" : true, "RO-OT" : true, "RO-PH" : true,
- "RO-SB" : true, "RO-SJ" : true, "RO-SM" : true, "RO-SV" : true, "RO-TL" : true,
- "RO-TM" : true, "RO-TR" : true, "RO-VL" : true, "RO-VN" : true, "RO-VS" : true,
- "RS-00" : true, "RS-01" : true, "RS-02" : true, "RS-03" : true, "RS-04" : true,
- "RS-05" : true, "RS-06" : true, "RS-07" : true, "RS-08" : true, "RS-09" : true,
- "RS-10" : true, "RS-11" : true, "RS-12" : true, "RS-13" : true, "RS-14" : true,
- "RS-15" : true, "RS-16" : true, "RS-17" : true, "RS-18" : true, "RS-19" : true,
- "RS-20" : true, "RS-21" : true, "RS-22" : true, "RS-23" : true, "RS-24" : true,
- "RS-25" : true, "RS-26" : true, "RS-27" : true, "RS-28" : true, "RS-29" : true,
- "RS-KM" : true, "RS-VO" : true, "RU-AD" : true, "RU-AL" : true, "RU-ALT" : true,
- "RU-AMU" : true, "RU-ARK" : true, "RU-AST" : true, "RU-BA" : true, "RU-BEL" : true,
- "RU-BRY" : true, "RU-BU" : true, "RU-CE" : true, "RU-CHE" : true, "RU-CHU" : true,
- "RU-CU" : true, "RU-DA" : true, "RU-IN" : true, "RU-IRK" : true, "RU-IVA" : true,
- "RU-KAM" : true, "RU-KB" : true, "RU-KC" : true, "RU-KDA" : true, "RU-KEM" : true,
- "RU-KGD" : true, "RU-KGN" : true, "RU-KHA" : true, "RU-KHM" : true, "RU-KIR" : true,
- "RU-KK" : true, "RU-KL" : true, "RU-KLU" : true, "RU-KO" : true, "RU-KOS" : true,
- "RU-KR" : true, "RU-KRS" : true, "RU-KYA" : true, "RU-LEN" : true, "RU-LIP" : true,
- "RU-MAG" : true, "RU-ME" : true, "RU-MO" : true, "RU-MOS" : true, "RU-MOW" : true,
- "RU-MUR" : true, "RU-NEN" : true, "RU-NGR" : true, "RU-NIZ" : true, "RU-NVS" : true,
- "RU-OMS" : true, "RU-ORE" : true, "RU-ORL" : true, "RU-PER" : true, "RU-PNZ" : true,
- "RU-PRI" : true, "RU-PSK" : true, "RU-ROS" : true, "RU-RYA" : true, "RU-SA" : true,
- "RU-SAK" : true, "RU-SAM" : true, "RU-SAR" : true, "RU-SE" : true, "RU-SMO" : true,
- "RU-SPE" : true, "RU-STA" : true, "RU-SVE" : true, "RU-TA" : true, "RU-TAM" : true,
- "RU-TOM" : true, "RU-TUL" : true, "RU-TVE" : true, "RU-TY" : true, "RU-TYU" : true,
- "RU-UD" : true, "RU-ULY" : true, "RU-VGG" : true, "RU-VLA" : true, "RU-VLG" : true,
- "RU-VOR" : true, "RU-YAN" : true, "RU-YAR" : true, "RU-YEV" : true, "RU-ZAB" : true,
- "RW-01" : true, "RW-02" : true, "RW-03" : true, "RW-04" : true, "RW-05" : true,
- "SA-01" : true, "SA-02" : true, "SA-03" : true, "SA-04" : true, "SA-05" : true,
- "SA-06" : true, "SA-07" : true, "SA-08" : true, "SA-09" : true, "SA-10" : true,
- "SA-11" : true, "SA-12" : true, "SA-14" : true, "SB-CE" : true, "SB-CH" : true,
- "SB-CT" : true, "SB-GU" : true, "SB-IS" : true, "SB-MK" : true, "SB-ML" : true,
- "SB-RB" : true, "SB-TE" : true, "SB-WE" : true, "SC-01" : true, "SC-02" : true,
- "SC-03" : true, "SC-04" : true, "SC-05" : true, "SC-06" : true, "SC-07" : true,
- "SC-08" : true, "SC-09" : true, "SC-10" : true, "SC-11" : true, "SC-12" : true,
- "SC-13" : true, "SC-14" : true, "SC-15" : true, "SC-16" : true, "SC-17" : true,
- "SC-18" : true, "SC-19" : true, "SC-20" : true, "SC-21" : true, "SC-22" : true,
- "SC-23" : true, "SC-24" : true, "SC-25" : true, "SD-DC" : true, "SD-DE" : true,
- "SD-DN" : true, "SD-DS" : true, "SD-DW" : true, "SD-GD" : true, "SD-GZ" : true,
- "SD-KA" : true, "SD-KH" : true, "SD-KN" : true, "SD-KS" : true, "SD-NB" : true,
- "SD-NO" : true, "SD-NR" : true, "SD-NW" : true, "SD-RS" : true, "SD-SI" : true,
- "SE-AB" : true, "SE-AC" : true, "SE-BD" : true, "SE-C" : true, "SE-D" : true,
- "SE-E" : true, "SE-F" : true, "SE-G" : true, "SE-H" : true, "SE-I" : true,
- "SE-K" : true, "SE-M" : true, "SE-N" : true, "SE-O" : true, "SE-S" : true,
- "SE-T" : true, "SE-U" : true, "SE-W" : true, "SE-X" : true, "SE-Y" : true,
- "SE-Z" : true, "SG-01" : true, "SG-02" : true, "SG-03" : true, "SG-04" : true,
- "SG-05" : true, "SH-AC" : true, "SH-HL" : true, "SH-TA" : true, "SI-001" : true,
- "SI-002" : true, "SI-003" : true, "SI-004" : true, "SI-005" : true, "SI-006" : true,
- "SI-007" : true, "SI-008" : true, "SI-009" : true, "SI-010" : true, "SI-011" : true,
- "SI-012" : true, "SI-013" : true, "SI-014" : true, "SI-015" : true, "SI-016" : true,
- "SI-017" : true, "SI-018" : true, "SI-019" : true, "SI-020" : true, "SI-021" : true,
- "SI-022" : true, "SI-023" : true, "SI-024" : true, "SI-025" : true, "SI-026" : true,
- "SI-027" : true, "SI-028" : true, "SI-029" : true, "SI-030" : true, "SI-031" : true,
- "SI-032" : true, "SI-033" : true, "SI-034" : true, "SI-035" : true, "SI-036" : true,
- "SI-037" : true, "SI-038" : true, "SI-039" : true, "SI-040" : true, "SI-041" : true,
- "SI-042" : true, "SI-043" : true, "SI-044" : true, "SI-045" : true, "SI-046" : true,
- "SI-047" : true, "SI-048" : true, "SI-049" : true, "SI-050" : true, "SI-051" : true,
- "SI-052" : true, "SI-053" : true, "SI-054" : true, "SI-055" : true, "SI-056" : true,
- "SI-057" : true, "SI-058" : true, "SI-059" : true, "SI-060" : true, "SI-061" : true,
- "SI-062" : true, "SI-063" : true, "SI-064" : true, "SI-065" : true, "SI-066" : true,
- "SI-067" : true, "SI-068" : true, "SI-069" : true, "SI-070" : true, "SI-071" : true,
- "SI-072" : true, "SI-073" : true, "SI-074" : true, "SI-075" : true, "SI-076" : true,
- "SI-077" : true, "SI-078" : true, "SI-079" : true, "SI-080" : true, "SI-081" : true,
- "SI-082" : true, "SI-083" : true, "SI-084" : true, "SI-085" : true, "SI-086" : true,
- "SI-087" : true, "SI-088" : true, "SI-089" : true, "SI-090" : true, "SI-091" : true,
- "SI-092" : true, "SI-093" : true, "SI-094" : true, "SI-095" : true, "SI-096" : true,
- "SI-097" : true, "SI-098" : true, "SI-099" : true, "SI-100" : true, "SI-101" : true,
- "SI-102" : true, "SI-103" : true, "SI-104" : true, "SI-105" : true, "SI-106" : true,
- "SI-107" : true, "SI-108" : true, "SI-109" : true, "SI-110" : true, "SI-111" : true,
- "SI-112" : true, "SI-113" : true, "SI-114" : true, "SI-115" : true, "SI-116" : true,
- "SI-117" : true, "SI-118" : true, "SI-119" : true, "SI-120" : true, "SI-121" : true,
- "SI-122" : true, "SI-123" : true, "SI-124" : true, "SI-125" : true, "SI-126" : true,
- "SI-127" : true, "SI-128" : true, "SI-129" : true, "SI-130" : true, "SI-131" : true,
- "SI-132" : true, "SI-133" : true, "SI-134" : true, "SI-135" : true, "SI-136" : true,
- "SI-137" : true, "SI-138" : true, "SI-139" : true, "SI-140" : true, "SI-141" : true,
- "SI-142" : true, "SI-143" : true, "SI-144" : true, "SI-146" : true, "SI-147" : true,
- "SI-148" : true, "SI-149" : true, "SI-150" : true, "SI-151" : true, "SI-152" : true,
- "SI-153" : true, "SI-154" : true, "SI-155" : true, "SI-156" : true, "SI-157" : true,
- "SI-158" : true, "SI-159" : true, "SI-160" : true, "SI-161" : true, "SI-162" : true,
- "SI-163" : true, "SI-164" : true, "SI-165" : true, "SI-166" : true, "SI-167" : true,
- "SI-168" : true, "SI-169" : true, "SI-170" : true, "SI-171" : true, "SI-172" : true,
- "SI-173" : true, "SI-174" : true, "SI-175" : true, "SI-176" : true, "SI-177" : true,
- "SI-178" : true, "SI-179" : true, "SI-180" : true, "SI-181" : true, "SI-182" : true,
- "SI-183" : true, "SI-184" : true, "SI-185" : true, "SI-186" : true, "SI-187" : true,
- "SI-188" : true, "SI-189" : true, "SI-190" : true, "SI-191" : true, "SI-192" : true,
- "SI-193" : true, "SI-194" : true, "SI-195" : true, "SI-196" : true, "SI-197" : true,
- "SI-198" : true, "SI-199" : true, "SI-200" : true, "SI-201" : true, "SI-202" : true,
- "SI-203" : true, "SI-204" : true, "SI-205" : true, "SI-206" : true, "SI-207" : true,
- "SI-208" : true, "SI-209" : true, "SI-210" : true, "SI-211" : true, "SK-BC" : true,
- "SK-BL" : true, "SK-KI" : true, "SK-NI" : true, "SK-PV" : true, "SK-TA" : true,
- "SK-TC" : true, "SK-ZI" : true, "SL-E" : true, "SL-N" : true, "SL-S" : true,
- "SL-W" : true, "SM-01" : true, "SM-02" : true, "SM-03" : true, "SM-04" : true,
- "SM-05" : true, "SM-06" : true, "SM-07" : true, "SM-08" : true, "SM-09" : true,
- "SN-DB" : true, "SN-DK" : true, "SN-FK" : true, "SN-KA" : true, "SN-KD" : true,
- "SN-KE" : true, "SN-KL" : true, "SN-LG" : true, "SN-MT" : true, "SN-SE" : true,
- "SN-SL" : true, "SN-TC" : true, "SN-TH" : true, "SN-ZG" : true, "SO-AW" : true,
- "SO-BK" : true, "SO-BN" : true, "SO-BR" : true, "SO-BY" : true, "SO-GA" : true,
- "SO-GE" : true, "SO-HI" : true, "SO-JD" : true, "SO-JH" : true, "SO-MU" : true,
- "SO-NU" : true, "SO-SA" : true, "SO-SD" : true, "SO-SH" : true, "SO-SO" : true,
- "SO-TO" : true, "SO-WO" : true, "SR-BR" : true, "SR-CM" : true, "SR-CR" : true,
- "SR-MA" : true, "SR-NI" : true, "SR-PM" : true, "SR-PR" : true, "SR-SA" : true,
- "SR-SI" : true, "SR-WA" : true, "SS-BN" : true, "SS-BW" : true, "SS-EC" : true,
- "SS-EE8" : true, "SS-EW" : true, "SS-JG" : true, "SS-LK" : true, "SS-NU" : true,
- "SS-UY" : true, "SS-WR" : true, "ST-P" : true, "ST-S" : true, "SV-AH" : true,
- "SV-CA" : true, "SV-CH" : true, "SV-CU" : true, "SV-LI" : true, "SV-MO" : true,
- "SV-PA" : true, "SV-SA" : true, "SV-SM" : true, "SV-SO" : true, "SV-SS" : true,
- "SV-SV" : true, "SV-UN" : true, "SV-US" : true, "SY-DI" : true, "SY-DR" : true,
- "SY-DY" : true, "SY-HA" : true, "SY-HI" : true, "SY-HL" : true, "SY-HM" : true,
- "SY-ID" : true, "SY-LA" : true, "SY-QU" : true, "SY-RA" : true, "SY-RD" : true,
- "SY-SU" : true, "SY-TA" : true, "SZ-HH" : true, "SZ-LU" : true, "SZ-MA" : true,
- "SZ-SH" : true, "TD-BA" : true, "TD-BG" : true, "TD-BO" : true, "TD-CB" : true,
- "TD-EN" : true, "TD-GR" : true, "TD-HL" : true, "TD-KA" : true, "TD-LC" : true,
- "TD-LO" : true, "TD-LR" : true, "TD-MA" : true, "TD-MC" : true, "TD-ME" : true,
- "TD-MO" : true, "TD-ND" : true, "TD-OD" : true, "TD-SA" : true, "TD-SI" : true,
- "TD-TA" : true, "TD-TI" : true, "TD-WF" : true, "TG-C" : true, "TG-K" : true,
- "TG-M" : true, "TG-P" : true, "TG-S" : true, "TH-10" : true, "TH-11" : true,
- "TH-12" : true, "TH-13" : true, "TH-14" : true, "TH-15" : true, "TH-16" : true,
- "TH-17" : true, "TH-18" : true, "TH-19" : true, "TH-20" : true, "TH-21" : true,
- "TH-22" : true, "TH-23" : true, "TH-24" : true, "TH-25" : true, "TH-26" : true,
- "TH-27" : true, "TH-30" : true, "TH-31" : true, "TH-32" : true, "TH-33" : true,
- "TH-34" : true, "TH-35" : true, "TH-36" : true, "TH-37" : true, "TH-39" : true,
- "TH-40" : true, "TH-41" : true, "TH-42" : true, "TH-43" : true, "TH-44" : true,
- "TH-45" : true, "TH-46" : true, "TH-47" : true, "TH-48" : true, "TH-49" : true,
- "TH-50" : true, "TH-51" : true, "TH-52" : true, "TH-53" : true, "TH-54" : true,
- "TH-55" : true, "TH-56" : true, "TH-57" : true, "TH-58" : true, "TH-60" : true,
- "TH-61" : true, "TH-62" : true, "TH-63" : true, "TH-64" : true, "TH-65" : true,
- "TH-66" : true, "TH-67" : true, "TH-70" : true, "TH-71" : true, "TH-72" : true,
- "TH-73" : true, "TH-74" : true, "TH-75" : true, "TH-76" : true, "TH-77" : true,
- "TH-80" : true, "TH-81" : true, "TH-82" : true, "TH-83" : true, "TH-84" : true,
- "TH-85" : true, "TH-86" : true, "TH-90" : true, "TH-91" : true, "TH-92" : true,
- "TH-93" : true, "TH-94" : true, "TH-95" : true, "TH-96" : true, "TH-S" : true,
- "TJ-GB" : true, "TJ-KT" : true, "TJ-SU" : true, "TL-AL" : true, "TL-AN" : true,
- "TL-BA" : true, "TL-BO" : true, "TL-CO" : true, "TL-DI" : true, "TL-ER" : true,
- "TL-LA" : true, "TL-LI" : true, "TL-MF" : true, "TL-MT" : true, "TL-OE" : true,
- "TL-VI" : true, "TM-A" : true, "TM-B" : true, "TM-D" : true, "TM-L" : true,
- "TM-M" : true, "TM-S" : true, "TN-11" : true, "TN-12" : true, "TN-13" : true,
- "TN-14" : true, "TN-21" : true, "TN-22" : true, "TN-23" : true, "TN-31" : true,
- "TN-32" : true, "TN-33" : true, "TN-34" : true, "TN-41" : true, "TN-42" : true,
- "TN-43" : true, "TN-51" : true, "TN-52" : true, "TN-53" : true, "TN-61" : true,
- "TN-71" : true, "TN-72" : true, "TN-73" : true, "TN-81" : true, "TN-82" : true,
- "TN-83" : true, "TO-01" : true, "TO-02" : true, "TO-03" : true, "TO-04" : true,
- "TO-05" : true, "TR-01" : true, "TR-02" : true, "TR-03" : true, "TR-04" : true,
- "TR-05" : true, "TR-06" : true, "TR-07" : true, "TR-08" : true, "TR-09" : true,
- "TR-10" : true, "TR-11" : true, "TR-12" : true, "TR-13" : true, "TR-14" : true,
- "TR-15" : true, "TR-16" : true, "TR-17" : true, "TR-18" : true, "TR-19" : true,
- "TR-20" : true, "TR-21" : true, "TR-22" : true, "TR-23" : true, "TR-24" : true,
- "TR-25" : true, "TR-26" : true, "TR-27" : true, "TR-28" : true, "TR-29" : true,
- "TR-30" : true, "TR-31" : true, "TR-32" : true, "TR-33" : true, "TR-34" : true,
- "TR-35" : true, "TR-36" : true, "TR-37" : true, "TR-38" : true, "TR-39" : true,
- "TR-40" : true, "TR-41" : true, "TR-42" : true, "TR-43" : true, "TR-44" : true,
- "TR-45" : true, "TR-46" : true, "TR-47" : true, "TR-48" : true, "TR-49" : true,
- "TR-50" : true, "TR-51" : true, "TR-52" : true, "TR-53" : true, "TR-54" : true,
- "TR-55" : true, "TR-56" : true, "TR-57" : true, "TR-58" : true, "TR-59" : true,
- "TR-60" : true, "TR-61" : true, "TR-62" : true, "TR-63" : true, "TR-64" : true,
- "TR-65" : true, "TR-66" : true, "TR-67" : true, "TR-68" : true, "TR-69" : true,
- "TR-70" : true, "TR-71" : true, "TR-72" : true, "TR-73" : true, "TR-74" : true,
- "TR-75" : true, "TR-76" : true, "TR-77" : true, "TR-78" : true, "TR-79" : true,
- "TR-80" : true, "TR-81" : true, "TT-ARI" : true, "TT-CHA" : true, "TT-CTT" : true,
- "TT-DMN" : true, "TT-ETO" : true, "TT-PED" : true, "TT-POS" : true, "TT-PRT" : true,
- "TT-PTF" : true, "TT-RCM" : true, "TT-SFO" : true, "TT-SGE" : true, "TT-SIP" : true,
- "TT-SJL" : true, "TT-TUP" : true, "TT-WTO" : true, "TV-FUN" : true, "TV-NIT" : true,
- "TV-NKF" : true, "TV-NKL" : true, "TV-NMA" : true, "TV-NMG" : true, "TV-NUI" : true,
- "TV-VAI" : true, "TW-CHA" : true, "TW-CYI" : true, "TW-CYQ" : true, "TW-HSQ" : true,
- "TW-HSZ" : true, "TW-HUA" : true, "TW-ILA" : true, "TW-KEE" : true, "TW-KHH" : true,
- "TW-KHQ" : true, "TW-MIA" : true, "TW-NAN" : true, "TW-PEN" : true, "TW-PIF" : true,
- "TW-TAO" : true, "TW-TNN" : true, "TW-TNQ" : true, "TW-TPE" : true, "TW-TPQ" : true,
- "TW-TTT" : true, "TW-TXG" : true, "TW-TXQ" : true, "TW-YUN" : true, "TZ-01" : true,
- "TZ-02" : true, "TZ-03" : true, "TZ-04" : true, "TZ-05" : true, "TZ-06" : true,
- "TZ-07" : true, "TZ-08" : true, "TZ-09" : true, "TZ-10" : true, "TZ-11" : true,
- "TZ-12" : true, "TZ-13" : true, "TZ-14" : true, "TZ-15" : true, "TZ-16" : true,
- "TZ-17" : true, "TZ-18" : true, "TZ-19" : true, "TZ-20" : true, "TZ-21" : true,
- "TZ-22" : true, "TZ-23" : true, "TZ-24" : true, "TZ-25" : true, "TZ-26" : true,
- "UA-05" : true, "UA-07" : true, "UA-09" : true, "UA-12" : true, "UA-14" : true,
- "UA-18" : true, "UA-21" : true, "UA-23" : true, "UA-26" : true, "UA-30" : true,
- "UA-32" : true, "UA-35" : true, "UA-40" : true, "UA-43" : true, "UA-46" : true,
- "UA-48" : true, "UA-51" : true, "UA-53" : true, "UA-56" : true, "UA-59" : true,
- "UA-61" : true, "UA-63" : true, "UA-65" : true, "UA-68" : true, "UA-71" : true,
- "UA-74" : true, "UA-77" : true, "UG-101" : true, "UG-102" : true, "UG-103" : true,
- "UG-104" : true, "UG-105" : true, "UG-106" : true, "UG-107" : true, "UG-108" : true,
- "UG-109" : true, "UG-110" : true, "UG-111" : true, "UG-112" : true, "UG-113" : true,
- "UG-114" : true, "UG-115" : true, "UG-116" : true, "UG-201" : true, "UG-202" : true,
- "UG-203" : true, "UG-204" : true, "UG-205" : true, "UG-206" : true, "UG-207" : true,
- "UG-208" : true, "UG-209" : true, "UG-210" : true, "UG-211" : true, "UG-212" : true,
- "UG-213" : true, "UG-214" : true, "UG-215" : true, "UG-216" : true, "UG-217" : true,
- "UG-218" : true, "UG-219" : true, "UG-220" : true, "UG-221" : true, "UG-222" : true,
- "UG-223" : true, "UG-224" : true, "UG-301" : true, "UG-302" : true, "UG-303" : true,
- "UG-304" : true, "UG-305" : true, "UG-306" : true, "UG-307" : true, "UG-308" : true,
- "UG-309" : true, "UG-310" : true, "UG-311" : true, "UG-312" : true, "UG-313" : true,
- "UG-314" : true, "UG-315" : true, "UG-316" : true, "UG-317" : true, "UG-318" : true,
- "UG-319" : true, "UG-320" : true, "UG-321" : true, "UG-401" : true, "UG-402" : true,
- "UG-403" : true, "UG-404" : true, "UG-405" : true, "UG-406" : true, "UG-407" : true,
- "UG-408" : true, "UG-409" : true, "UG-410" : true, "UG-411" : true, "UG-412" : true,
- "UG-413" : true, "UG-414" : true, "UG-415" : true, "UG-416" : true, "UG-417" : true,
- "UG-418" : true, "UG-419" : true, "UG-C" : true, "UG-E" : true, "UG-N" : true,
- "UG-W" : true, "UM-67" : true, "UM-71" : true, "UM-76" : true, "UM-79" : true,
- "UM-81" : true, "UM-84" : true, "UM-86" : true, "UM-89" : true, "UM-95" : true,
- "US-AK" : true, "US-AL" : true, "US-AR" : true, "US-AS" : true, "US-AZ" : true,
- "US-CA" : true, "US-CO" : true, "US-CT" : true, "US-DC" : true, "US-DE" : true,
- "US-FL" : true, "US-GA" : true, "US-GU" : true, "US-HI" : true, "US-IA" : true,
- "US-ID" : true, "US-IL" : true, "US-IN" : true, "US-KS" : true, "US-KY" : true,
- "US-LA" : true, "US-MA" : true, "US-MD" : true, "US-ME" : true, "US-MI" : true,
- "US-MN" : true, "US-MO" : true, "US-MP" : true, "US-MS" : true, "US-MT" : true,
- "US-NC" : true, "US-ND" : true, "US-NE" : true, "US-NH" : true, "US-NJ" : true,
- "US-NM" : true, "US-NV" : true, "US-NY" : true, "US-OH" : true, "US-OK" : true,
- "US-OR" : true, "US-PA" : true, "US-PR" : true, "US-RI" : true, "US-SC" : true,
- "US-SD" : true, "US-TN" : true, "US-TX" : true, "US-UM" : true, "US-UT" : true,
- "US-VA" : true, "US-VI" : true, "US-VT" : true, "US-WA" : true, "US-WI" : true,
- "US-WV" : true, "US-WY" : true, "UY-AR" : true, "UY-CA" : true, "UY-CL" : true,
- "UY-CO" : true, "UY-DU" : true, "UY-FD" : true, "UY-FS" : true, "UY-LA" : true,
- "UY-MA" : true, "UY-MO" : true, "UY-PA" : true, "UY-RN" : true, "UY-RO" : true,
- "UY-RV" : true, "UY-SA" : true, "UY-SJ" : true, "UY-SO" : true, "UY-TA" : true,
- "UY-TT" : true, "UZ-AN" : true, "UZ-BU" : true, "UZ-FA" : true, "UZ-JI" : true,
- "UZ-NG" : true, "UZ-NW" : true, "UZ-QA" : true, "UZ-QR" : true, "UZ-SA" : true,
- "UZ-SI" : true, "UZ-SU" : true, "UZ-TK" : true, "UZ-TO" : true, "UZ-XO" : true,
- "VC-01" : true, "VC-02" : true, "VC-03" : true, "VC-04" : true, "VC-05" : true,
- "VC-06" : true, "VE-A" : true, "VE-B" : true, "VE-C" : true, "VE-D" : true,
- "VE-E" : true, "VE-F" : true, "VE-G" : true, "VE-H" : true, "VE-I" : true,
- "VE-J" : true, "VE-K" : true, "VE-L" : true, "VE-M" : true, "VE-N" : true,
- "VE-O" : true, "VE-P" : true, "VE-R" : true, "VE-S" : true, "VE-T" : true,
- "VE-U" : true, "VE-V" : true, "VE-W" : true, "VE-X" : true, "VE-Y" : true,
- "VE-Z" : true, "VN-01" : true, "VN-02" : true, "VN-03" : true, "VN-04" : true,
- "VN-05" : true, "VN-06" : true, "VN-07" : true, "VN-09" : true, "VN-13" : true,
- "VN-14" : true, "VN-15" : true, "VN-18" : true, "VN-20" : true, "VN-21" : true,
- "VN-22" : true, "VN-23" : true, "VN-24" : true, "VN-25" : true, "VN-26" : true,
- "VN-27" : true, "VN-28" : true, "VN-29" : true, "VN-30" : true, "VN-31" : true,
- "VN-32" : true, "VN-33" : true, "VN-34" : true, "VN-35" : true, "VN-36" : true,
- "VN-37" : true, "VN-39" : true, "VN-40" : true, "VN-41" : true, "VN-43" : true,
- "VN-44" : true, "VN-45" : true, "VN-46" : true, "VN-47" : true, "VN-49" : true,
- "VN-50" : true, "VN-51" : true, "VN-52" : true, "VN-53" : true, "VN-54" : true,
- "VN-55" : true, "VN-56" : true, "VN-57" : true, "VN-58" : true, "VN-59" : true,
- "VN-61" : true, "VN-63" : true, "VN-66" : true, "VN-67" : true, "VN-68" : true,
- "VN-69" : true, "VN-70" : true, "VN-71" : true, "VN-72" : true, "VN-73" : true,
- "VN-CT" : true, "VN-DN" : true, "VN-HN" : true, "VN-HP" : true, "VN-SG" : true,
- "VU-MAP" : true, "VU-PAM" : true, "VU-SAM" : true, "VU-SEE" : true, "VU-TAE" : true,
- "VU-TOB" : true, "WS-AA" : true, "WS-AL" : true, "WS-AT" : true, "WS-FA" : true,
- "WS-GE" : true, "WS-GI" : true, "WS-PA" : true, "WS-SA" : true, "WS-TU" : true,
- "WS-VF" : true, "WS-VS" : true, "YE-AB" : true, "YE-AD" : true, "YE-AM" : true,
- "YE-BA" : true, "YE-DA" : true, "YE-DH" : true, "YE-HD" : true, "YE-HJ" : true,
- "YE-IB" : true, "YE-JA" : true, "YE-LA" : true, "YE-MA" : true, "YE-MR" : true,
- "YE-MU" : true, "YE-MW" : true, "YE-RA" : true, "YE-SD" : true, "YE-SH" : true,
- "YE-SN" : true, "YE-TA" : true, "ZA-EC" : true, "ZA-FS" : true, "ZA-GP" : true,
- "ZA-LP" : true, "ZA-MP" : true, "ZA-NC" : true, "ZA-NW" : true, "ZA-WC" : true,
- "ZA-ZN" : true, "ZM-01" : true, "ZM-02" : true, "ZM-03" : true, "ZM-04" : true,
- "ZM-05" : true, "ZM-06" : true, "ZM-07" : true, "ZM-08" : true, "ZM-09" : true,
- "ZW-BU" : true, "ZW-HA" : true, "ZW-MA" : true, "ZW-MC" : true, "ZW-ME" : true,
- "ZW-MI" : true, "ZW-MN" : true, "ZW-MS" : true, "ZW-MV" : true, "ZW-MW" : true,
+ "AD-02": true, "AD-03": true, "AD-04": true, "AD-05": true, "AD-06": true,
+ "AD-07": true, "AD-08": true, "AE-AJ": true, "AE-AZ": true, "AE-DU": true,
+ "AE-FU": true, "AE-RK": true, "AE-SH": true, "AE-UQ": true, "AF-BAL": true,
+ "AF-BAM": true, "AF-BDG": true, "AF-BDS": true, "AF-BGL": true, "AF-DAY": true,
+ "AF-FRA": true, "AF-FYB": true, "AF-GHA": true, "AF-GHO": true, "AF-HEL": true,
+ "AF-HER": true, "AF-JOW": true, "AF-KAB": true, "AF-KAN": true, "AF-KAP": true,
+ "AF-KDZ": true, "AF-KHO": true, "AF-KNR": true, "AF-LAG": true, "AF-LOG": true,
+ "AF-NAN": true, "AF-NIM": true, "AF-NUR": true, "AF-PAN": true, "AF-PAR": true,
+ "AF-PIA": true, "AF-PKA": true, "AF-SAM": true, "AF-SAR": true, "AF-TAK": true,
+ "AF-URU": true, "AF-WAR": true, "AF-ZAB": true, "AG-03": true, "AG-04": true,
+ "AG-05": true, "AG-06": true, "AG-07": true, "AG-08": true, "AG-10": true,
+ "AG-11": true, "AL-01": true, "AL-02": true, "AL-03": true, "AL-04": true,
+ "AL-05": true, "AL-06": true, "AL-07": true, "AL-08": true, "AL-09": true,
+ "AL-10": true, "AL-11": true, "AL-12": true, "AL-BR": true, "AL-BU": true,
+ "AL-DI": true, "AL-DL": true, "AL-DR": true, "AL-DV": true, "AL-EL": true,
+ "AL-ER": true, "AL-FR": true, "AL-GJ": true, "AL-GR": true, "AL-HA": true,
+ "AL-KA": true, "AL-KB": true, "AL-KC": true, "AL-KO": true, "AL-KR": true,
+ "AL-KU": true, "AL-LB": true, "AL-LE": true, "AL-LU": true, "AL-MK": true,
+ "AL-MM": true, "AL-MR": true, "AL-MT": true, "AL-PG": true, "AL-PQ": true,
+ "AL-PR": true, "AL-PU": true, "AL-SH": true, "AL-SK": true, "AL-SR": true,
+ "AL-TE": true, "AL-TP": true, "AL-TR": true, "AL-VL": true, "AM-AG": true,
+ "AM-AR": true, "AM-AV": true, "AM-ER": true, "AM-GR": true, "AM-KT": true,
+ "AM-LO": true, "AM-SH": true, "AM-SU": true, "AM-TV": true, "AM-VD": true,
+ "AO-BGO": true, "AO-BGU": true, "AO-BIE": true, "AO-CAB": true, "AO-CCU": true,
+ "AO-CNN": true, "AO-CNO": true, "AO-CUS": true, "AO-HUA": true, "AO-HUI": true,
+ "AO-LNO": true, "AO-LSU": true, "AO-LUA": true, "AO-MAL": true, "AO-MOX": true,
+ "AO-NAM": true, "AO-UIG": true, "AO-ZAI": true, "AR-A": true, "AR-B": true,
+ "AR-C": true, "AR-D": true, "AR-E": true, "AR-F": true, "AR-G": true, "AR-H": true,
+ "AR-J": true, "AR-K": true, "AR-L": true, "AR-M": true, "AR-N": true,
+ "AR-P": true, "AR-Q": true, "AR-R": true, "AR-S": true, "AR-T": true,
+ "AR-U": true, "AR-V": true, "AR-W": true, "AR-X": true, "AR-Y": true,
+ "AR-Z": true, "AT-1": true, "AT-2": true, "AT-3": true, "AT-4": true,
+ "AT-5": true, "AT-6": true, "AT-7": true, "AT-8": true, "AT-9": true,
+ "AU-ACT": true, "AU-NSW": true, "AU-NT": true, "AU-QLD": true, "AU-SA": true,
+ "AU-TAS": true, "AU-VIC": true, "AU-WA": true, "AZ-ABS": true, "AZ-AGA": true,
+ "AZ-AGC": true, "AZ-AGM": true, "AZ-AGS": true, "AZ-AGU": true, "AZ-AST": true,
+ "AZ-BA": true, "AZ-BAB": true, "AZ-BAL": true, "AZ-BAR": true, "AZ-BEY": true,
+ "AZ-BIL": true, "AZ-CAB": true, "AZ-CAL": true, "AZ-CUL": true, "AZ-DAS": true,
+ "AZ-FUZ": true, "AZ-GA": true, "AZ-GAD": true, "AZ-GOR": true, "AZ-GOY": true,
+ "AZ-GYG": true, "AZ-HAC": true, "AZ-IMI": true, "AZ-ISM": true, "AZ-KAL": true,
+ "AZ-KAN": true, "AZ-KUR": true, "AZ-LA": true, "AZ-LAC": true, "AZ-LAN": true,
+ "AZ-LER": true, "AZ-MAS": true, "AZ-MI": true, "AZ-NA": true, "AZ-NEF": true,
+ "AZ-NV": true, "AZ-NX": true, "AZ-OGU": true, "AZ-ORD": true, "AZ-QAB": true,
+ "AZ-QAX": true, "AZ-QAZ": true, "AZ-QBA": true, "AZ-QBI": true, "AZ-QOB": true,
+ "AZ-QUS": true, "AZ-SA": true, "AZ-SAB": true, "AZ-SAD": true, "AZ-SAH": true,
+ "AZ-SAK": true, "AZ-SAL": true, "AZ-SAR": true, "AZ-SAT": true, "AZ-SBN": true,
+ "AZ-SIY": true, "AZ-SKR": true, "AZ-SM": true, "AZ-SMI": true, "AZ-SMX": true,
+ "AZ-SR": true, "AZ-SUS": true, "AZ-TAR": true, "AZ-TOV": true, "AZ-UCA": true,
+ "AZ-XA": true, "AZ-XAC": true, "AZ-XCI": true, "AZ-XIZ": true, "AZ-XVD": true,
+ "AZ-YAR": true, "AZ-YE": true, "AZ-YEV": true, "AZ-ZAN": true, "AZ-ZAQ": true,
+ "AZ-ZAR": true, "BA-01": true, "BA-02": true, "BA-03": true, "BA-04": true,
+ "BA-05": true, "BA-06": true, "BA-07": true, "BA-08": true, "BA-09": true,
+ "BA-10": true, "BA-BIH": true, "BA-BRC": true, "BA-SRP": true, "BB-01": true,
+ "BB-02": true, "BB-03": true, "BB-04": true, "BB-05": true, "BB-06": true,
+ "BB-07": true, "BB-08": true, "BB-09": true, "BB-10": true, "BB-11": true,
+ "BD-01": true, "BD-02": true, "BD-03": true, "BD-04": true, "BD-05": true,
+ "BD-06": true, "BD-07": true, "BD-08": true, "BD-09": true, "BD-10": true,
+ "BD-11": true, "BD-12": true, "BD-13": true, "BD-14": true, "BD-15": true,
+ "BD-16": true, "BD-17": true, "BD-18": true, "BD-19": true, "BD-20": true,
+ "BD-21": true, "BD-22": true, "BD-23": true, "BD-24": true, "BD-25": true,
+ "BD-26": true, "BD-27": true, "BD-28": true, "BD-29": true, "BD-30": true,
+ "BD-31": true, "BD-32": true, "BD-33": true, "BD-34": true, "BD-35": true,
+ "BD-36": true, "BD-37": true, "BD-38": true, "BD-39": true, "BD-40": true,
+ "BD-41": true, "BD-42": true, "BD-43": true, "BD-44": true, "BD-45": true,
+ "BD-46": true, "BD-47": true, "BD-48": true, "BD-49": true, "BD-50": true,
+ "BD-51": true, "BD-52": true, "BD-53": true, "BD-54": true, "BD-55": true,
+ "BD-56": true, "BD-57": true, "BD-58": true, "BD-59": true, "BD-60": true,
+ "BD-61": true, "BD-62": true, "BD-63": true, "BD-64": true, "BD-A": true,
+ "BD-B": true, "BD-C": true, "BD-D": true, "BD-E": true, "BD-F": true,
+ "BD-G": true, "BE-BRU": true, "BE-VAN": true, "BE-VBR": true, "BE-VLG": true,
+ "BE-VLI": true, "BE-VOV": true, "BE-VWV": true, "BE-WAL": true, "BE-WBR": true,
+ "BE-WHT": true, "BE-WLG": true, "BE-WLX": true, "BE-WNA": true, "BF-01": true,
+ "BF-02": true, "BF-03": true, "BF-04": true, "BF-05": true, "BF-06": true,
+ "BF-07": true, "BF-08": true, "BF-09": true, "BF-10": true, "BF-11": true,
+ "BF-12": true, "BF-13": true, "BF-BAL": true, "BF-BAM": true, "BF-BAN": true,
+ "BF-BAZ": true, "BF-BGR": true, "BF-BLG": true, "BF-BLK": true, "BF-COM": true,
+ "BF-GAN": true, "BF-GNA": true, "BF-GOU": true, "BF-HOU": true, "BF-IOB": true,
+ "BF-KAD": true, "BF-KEN": true, "BF-KMD": true, "BF-KMP": true, "BF-KOP": true,
+ "BF-KOS": true, "BF-KOT": true, "BF-KOW": true, "BF-LER": true, "BF-LOR": true,
+ "BF-MOU": true, "BF-NAM": true, "BF-NAO": true, "BF-NAY": true, "BF-NOU": true,
+ "BF-OUB": true, "BF-OUD": true, "BF-PAS": true, "BF-PON": true, "BF-SEN": true,
+ "BF-SIS": true, "BF-SMT": true, "BF-SNG": true, "BF-SOM": true, "BF-SOR": true,
+ "BF-TAP": true, "BF-TUI": true, "BF-YAG": true, "BF-YAT": true, "BF-ZIR": true,
+ "BF-ZON": true, "BF-ZOU": true, "BG-01": true, "BG-02": true, "BG-03": true,
+ "BG-04": true, "BG-05": true, "BG-06": true, "BG-07": true, "BG-08": true,
+ "BG-09": true, "BG-10": true, "BG-11": true, "BG-12": true, "BG-13": true,
+ "BG-14": true, "BG-15": true, "BG-16": true, "BG-17": true, "BG-18": true,
+ "BG-19": true, "BG-20": true, "BG-21": true, "BG-22": true, "BG-23": true,
+ "BG-24": true, "BG-25": true, "BG-26": true, "BG-27": true, "BG-28": true,
+ "BH-13": true, "BH-14": true, "BH-15": true, "BH-16": true, "BH-17": true,
+ "BI-BB": true, "BI-BL": true, "BI-BM": true, "BI-BR": true, "BI-CA": true,
+ "BI-CI": true, "BI-GI": true, "BI-KI": true, "BI-KR": true, "BI-KY": true,
+ "BI-MA": true, "BI-MU": true, "BI-MW": true, "BI-NG": true, "BI-RM": true, "BI-RT": true,
+ "BI-RY": true, "BJ-AK": true, "BJ-AL": true, "BJ-AQ": true, "BJ-BO": true,
+ "BJ-CO": true, "BJ-DO": true, "BJ-KO": true, "BJ-LI": true, "BJ-MO": true,
+ "BJ-OU": true, "BJ-PL": true, "BJ-ZO": true, "BN-BE": true, "BN-BM": true,
+ "BN-TE": true, "BN-TU": true, "BO-B": true, "BO-C": true, "BO-H": true,
+ "BO-L": true, "BO-N": true, "BO-O": true, "BO-P": true, "BO-S": true,
+ "BO-T": true, "BQ-BO": true, "BQ-SA": true, "BQ-SE": true, "BR-AC": true,
+ "BR-AL": true, "BR-AM": true, "BR-AP": true, "BR-BA": true, "BR-CE": true,
+ "BR-DF": true, "BR-ES": true, "BR-FN": true, "BR-GO": true, "BR-MA": true,
+ "BR-MG": true, "BR-MS": true, "BR-MT": true, "BR-PA": true, "BR-PB": true,
+ "BR-PE": true, "BR-PI": true, "BR-PR": true, "BR-RJ": true, "BR-RN": true,
+ "BR-RO": true, "BR-RR": true, "BR-RS": true, "BR-SC": true, "BR-SE": true,
+ "BR-SP": true, "BR-TO": true, "BS-AK": true, "BS-BI": true, "BS-BP": true,
+ "BS-BY": true, "BS-CE": true, "BS-CI": true, "BS-CK": true, "BS-CO": true,
+ "BS-CS": true, "BS-EG": true, "BS-EX": true, "BS-FP": true, "BS-GC": true,
+ "BS-HI": true, "BS-HT": true, "BS-IN": true, "BS-LI": true, "BS-MC": true,
+ "BS-MG": true, "BS-MI": true, "BS-NE": true, "BS-NO": true, "BS-NP": true, "BS-NS": true,
+ "BS-RC": true, "BS-RI": true, "BS-SA": true, "BS-SE": true, "BS-SO": true,
+ "BS-SS": true, "BS-SW": true, "BS-WG": true, "BT-11": true, "BT-12": true,
+ "BT-13": true, "BT-14": true, "BT-15": true, "BT-21": true, "BT-22": true,
+ "BT-23": true, "BT-24": true, "BT-31": true, "BT-32": true, "BT-33": true,
+ "BT-34": true, "BT-41": true, "BT-42": true, "BT-43": true, "BT-44": true,
+ "BT-45": true, "BT-GA": true, "BT-TY": true, "BW-CE": true, "BW-CH": true, "BW-GH": true,
+ "BW-KG": true, "BW-KL": true, "BW-KW": true, "BW-NE": true, "BW-NW": true,
+ "BW-SE": true, "BW-SO": true, "BY-BR": true, "BY-HM": true, "BY-HO": true,
+ "BY-HR": true, "BY-MA": true, "BY-MI": true, "BY-VI": true, "BZ-BZ": true,
+ "BZ-CY": true, "BZ-CZL": true, "BZ-OW": true, "BZ-SC": true, "BZ-TOL": true,
+ "CA-AB": true, "CA-BC": true, "CA-MB": true, "CA-NB": true, "CA-NL": true,
+ "CA-NS": true, "CA-NT": true, "CA-NU": true, "CA-ON": true, "CA-PE": true,
+ "CA-QC": true, "CA-SK": true, "CA-YT": true, "CD-BC": true, "CD-BN": true,
+ "CD-EQ": true, "CD-HK": true, "CD-IT": true, "CD-KA": true, "CD-KC": true, "CD-KE": true, "CD-KG": true, "CD-KN": true,
+ "CD-KW": true, "CD-KS": true, "CD-LU": true, "CD-MA": true, "CD-NK": true, "CD-OR": true, "CD-SA": true, "CD-SK": true,
+ "CD-TA": true, "CD-TO": true, "CF-AC": true, "CF-BB": true, "CF-BGF": true, "CF-BK": true, "CF-HK": true, "CF-HM": true,
+ "CF-HS": true, "CF-KB": true, "CF-KG": true, "CF-LB": true, "CF-MB": true,
+ "CF-MP": true, "CF-NM": true, "CF-OP": true, "CF-SE": true, "CF-UK": true,
+ "CF-VK": true, "CG-11": true, "CG-12": true, "CG-13": true, "CG-14": true,
+ "CG-15": true, "CG-16": true, "CG-2": true, "CG-5": true, "CG-7": true, "CG-8": true,
+ "CG-9": true, "CG-BZV": true, "CH-AG": true, "CH-AI": true, "CH-AR": true,
+ "CH-BE": true, "CH-BL": true, "CH-BS": true, "CH-FR": true, "CH-GE": true,
+ "CH-GL": true, "CH-GR": true, "CH-JU": true, "CH-LU": true, "CH-NE": true,
+ "CH-NW": true, "CH-OW": true, "CH-SG": true, "CH-SH": true, "CH-SO": true,
+ "CH-SZ": true, "CH-TG": true, "CH-TI": true, "CH-UR": true, "CH-VD": true,
+ "CH-VS": true, "CH-ZG": true, "CH-ZH": true, "CI-AB": true, "CI-BS": true,
+ "CI-CM": true, "CI-DN": true, "CI-GD": true, "CI-LC": true, "CI-LG": true,
+ "CI-MG": true, "CI-SM": true, "CI-SV": true, "CI-VB": true, "CI-WR": true,
+ "CI-YM": true, "CI-ZZ": true, "CL-AI": true, "CL-AN": true, "CL-AP": true,
+ "CL-AR": true, "CL-AT": true, "CL-BI": true, "CL-CO": true, "CL-LI": true,
+ "CL-LL": true, "CL-LR": true, "CL-MA": true, "CL-ML": true, "CL-NB": true, "CL-RM": true,
+ "CL-TA": true, "CL-VS": true, "CM-AD": true, "CM-CE": true, "CM-EN": true,
+ "CM-ES": true, "CM-LT": true, "CM-NO": true, "CM-NW": true, "CM-OU": true,
+ "CM-SU": true, "CM-SW": true, "CN-AH": true, "CN-BJ": true, "CN-CQ": true,
+ "CN-FJ": true, "CN-GS": true, "CN-GD": true, "CN-GX": true, "CN-GZ": true,
+ "CN-HI": true, "CN-HE": true, "CN-HL": true, "CN-HA": true, "CN-HB": true,
+ "CN-HN": true, "CN-JS": true, "CN-JX": true, "CN-JL": true, "CN-LN": true,
+ "CN-NM": true, "CN-NX": true, "CN-QH": true, "CN-SN": true, "CN-SD": true, "CN-SH": true,
+ "CN-SX": true, "CN-SC": true, "CN-TJ": true, "CN-XJ": true, "CN-XZ": true, "CN-YN": true,
+ "CN-ZJ": true, "CO-AMA": true, "CO-ANT": true, "CO-ARA": true, "CO-ATL": true,
+ "CO-BOL": true, "CO-BOY": true, "CO-CAL": true, "CO-CAQ": true, "CO-CAS": true,
+ "CO-CAU": true, "CO-CES": true, "CO-CHO": true, "CO-COR": true, "CO-CUN": true,
+ "CO-DC": true, "CO-GUA": true, "CO-GUV": true, "CO-HUI": true, "CO-LAG": true,
+ "CO-MAG": true, "CO-MET": true, "CO-NAR": true, "CO-NSA": true, "CO-PUT": true,
+ "CO-QUI": true, "CO-RIS": true, "CO-SAN": true, "CO-SAP": true, "CO-SUC": true,
+ "CO-TOL": true, "CO-VAC": true, "CO-VAU": true, "CO-VID": true, "CR-A": true,
+ "CR-C": true, "CR-G": true, "CR-H": true, "CR-L": true, "CR-P": true,
+ "CR-SJ": true, "CU-01": true, "CU-02": true, "CU-03": true, "CU-04": true,
+ "CU-05": true, "CU-06": true, "CU-07": true, "CU-08": true, "CU-09": true,
+ "CU-10": true, "CU-11": true, "CU-12": true, "CU-13": true, "CU-14": true, "CU-15": true,
+ "CU-16": true, "CU-99": true, "CV-B": true, "CV-BR": true, "CV-BV": true, "CV-CA": true,
+ "CV-CF": true, "CV-CR": true, "CV-MA": true, "CV-MO": true, "CV-PA": true,
+ "CV-PN": true, "CV-PR": true, "CV-RB": true, "CV-RG": true, "CV-RS": true,
+ "CV-S": true, "CV-SD": true, "CV-SF": true, "CV-SL": true, "CV-SM": true,
+ "CV-SO": true, "CV-SS": true, "CV-SV": true, "CV-TA": true, "CV-TS": true,
+ "CY-01": true, "CY-02": true, "CY-03": true, "CY-04": true, "CY-05": true,
+ "CY-06": true, "CZ-10": true, "CZ-101": true, "CZ-102": true, "CZ-103": true,
+ "CZ-104": true, "CZ-105": true, "CZ-106": true, "CZ-107": true, "CZ-108": true,
+ "CZ-109": true, "CZ-110": true, "CZ-111": true, "CZ-112": true, "CZ-113": true,
+ "CZ-114": true, "CZ-115": true, "CZ-116": true, "CZ-117": true, "CZ-118": true,
+ "CZ-119": true, "CZ-120": true, "CZ-121": true, "CZ-122": true, "CZ-20": true,
+ "CZ-201": true, "CZ-202": true, "CZ-203": true, "CZ-204": true, "CZ-205": true,
+ "CZ-206": true, "CZ-207": true, "CZ-208": true, "CZ-209": true, "CZ-20A": true,
+ "CZ-20B": true, "CZ-20C": true, "CZ-31": true, "CZ-311": true, "CZ-312": true,
+ "CZ-313": true, "CZ-314": true, "CZ-315": true, "CZ-316": true, "CZ-317": true,
+ "CZ-32": true, "CZ-321": true, "CZ-322": true, "CZ-323": true, "CZ-324": true,
+ "CZ-325": true, "CZ-326": true, "CZ-327": true, "CZ-41": true, "CZ-411": true,
+ "CZ-412": true, "CZ-413": true, "CZ-42": true, "CZ-421": true, "CZ-422": true,
+ "CZ-423": true, "CZ-424": true, "CZ-425": true, "CZ-426": true, "CZ-427": true,
+ "CZ-51": true, "CZ-511": true, "CZ-512": true, "CZ-513": true, "CZ-514": true,
+ "CZ-52": true, "CZ-521": true, "CZ-522": true, "CZ-523": true, "CZ-524": true,
+ "CZ-525": true, "CZ-53": true, "CZ-531": true, "CZ-532": true, "CZ-533": true,
+ "CZ-534": true, "CZ-63": true, "CZ-631": true, "CZ-632": true, "CZ-633": true,
+ "CZ-634": true, "CZ-635": true, "CZ-64": true, "CZ-641": true, "CZ-642": true,
+ "CZ-643": true, "CZ-644": true, "CZ-645": true, "CZ-646": true, "CZ-647": true,
+ "CZ-71": true, "CZ-711": true, "CZ-712": true, "CZ-713": true, "CZ-714": true,
+ "CZ-715": true, "CZ-72": true, "CZ-721": true, "CZ-722": true, "CZ-723": true,
+ "CZ-724": true, "CZ-80": true, "CZ-801": true, "CZ-802": true, "CZ-803": true,
+ "CZ-804": true, "CZ-805": true, "CZ-806": true, "DE-BB": true, "DE-BE": true,
+ "DE-BW": true, "DE-BY": true, "DE-HB": true, "DE-HE": true, "DE-HH": true,
+ "DE-MV": true, "DE-NI": true, "DE-NW": true, "DE-RP": true, "DE-SH": true,
+ "DE-SL": true, "DE-SN": true, "DE-ST": true, "DE-TH": true, "DJ-AR": true,
+ "DJ-AS": true, "DJ-DI": true, "DJ-DJ": true, "DJ-OB": true, "DJ-TA": true,
+ "DK-81": true, "DK-82": true, "DK-83": true, "DK-84": true, "DK-85": true,
+ "DM-01": true, "DM-02": true, "DM-03": true, "DM-04": true, "DM-05": true,
+ "DM-06": true, "DM-07": true, "DM-08": true, "DM-09": true, "DM-10": true,
+ "DO-01": true, "DO-02": true, "DO-03": true, "DO-04": true, "DO-05": true,
+ "DO-06": true, "DO-07": true, "DO-08": true, "DO-09": true, "DO-10": true,
+ "DO-11": true, "DO-12": true, "DO-13": true, "DO-14": true, "DO-15": true,
+ "DO-16": true, "DO-17": true, "DO-18": true, "DO-19": true, "DO-20": true,
+ "DO-21": true, "DO-22": true, "DO-23": true, "DO-24": true, "DO-25": true,
+ "DO-26": true, "DO-27": true, "DO-28": true, "DO-29": true, "DO-30": true, "DO-31": true,
+ "DZ-01": true, "DZ-02": true, "DZ-03": true, "DZ-04": true, "DZ-05": true,
+ "DZ-06": true, "DZ-07": true, "DZ-08": true, "DZ-09": true, "DZ-10": true,
+ "DZ-11": true, "DZ-12": true, "DZ-13": true, "DZ-14": true, "DZ-15": true,
+ "DZ-16": true, "DZ-17": true, "DZ-18": true, "DZ-19": true, "DZ-20": true,
+ "DZ-21": true, "DZ-22": true, "DZ-23": true, "DZ-24": true, "DZ-25": true,
+ "DZ-26": true, "DZ-27": true, "DZ-28": true, "DZ-29": true, "DZ-30": true,
+ "DZ-31": true, "DZ-32": true, "DZ-33": true, "DZ-34": true, "DZ-35": true,
+ "DZ-36": true, "DZ-37": true, "DZ-38": true, "DZ-39": true, "DZ-40": true,
+ "DZ-41": true, "DZ-42": true, "DZ-43": true, "DZ-44": true, "DZ-45": true,
+ "DZ-46": true, "DZ-47": true, "DZ-48": true, "DZ-49": true, "DZ-51": true,
+ "DZ-53": true, "DZ-55": true, "DZ-56": true, "DZ-57": true, "EC-A": true, "EC-B": true,
+ "EC-C": true, "EC-D": true, "EC-E": true, "EC-F": true, "EC-G": true,
+ "EC-H": true, "EC-I": true, "EC-L": true, "EC-M": true, "EC-N": true,
+ "EC-O": true, "EC-P": true, "EC-R": true, "EC-S": true, "EC-SD": true,
+ "EC-SE": true, "EC-T": true, "EC-U": true, "EC-W": true, "EC-X": true,
+ "EC-Y": true, "EC-Z": true, "EE-37": true, "EE-39": true, "EE-44": true, "EE-45": true,
+ "EE-49": true, "EE-50": true, "EE-51": true, "EE-52": true, "EE-56": true, "EE-57": true,
+ "EE-59": true, "EE-60": true, "EE-64": true, "EE-65": true, "EE-67": true, "EE-68": true,
+ "EE-70": true, "EE-71": true, "EE-74": true, "EE-78": true, "EE-79": true, "EE-81": true, "EE-82": true,
+ "EE-84": true, "EE-86": true, "EE-87": true, "EG-ALX": true, "EG-ASN": true, "EG-AST": true,
+ "EG-BA": true, "EG-BH": true, "EG-BNS": true, "EG-C": true, "EG-DK": true,
+ "EG-DT": true, "EG-FYM": true, "EG-GH": true, "EG-GZ": true, "EG-HU": true,
+ "EG-IS": true, "EG-JS": true, "EG-KB": true, "EG-KFS": true, "EG-KN": true,
+ "EG-LX": true, "EG-MN": true, "EG-MNF": true, "EG-MT": true, "EG-PTS": true, "EG-SHG": true,
+ "EG-SHR": true, "EG-SIN": true, "EG-SU": true, "EG-SUZ": true, "EG-WAD": true,
+ "ER-AN": true, "ER-DK": true, "ER-DU": true, "ER-GB": true, "ER-MA": true,
+ "ER-SK": true, "ES-A": true, "ES-AB": true, "ES-AL": true, "ES-AN": true,
+ "ES-AR": true, "ES-AS": true, "ES-AV": true, "ES-B": true, "ES-BA": true,
+ "ES-BI": true, "ES-BU": true, "ES-C": true, "ES-CA": true, "ES-CB": true,
+ "ES-CC": true, "ES-CE": true, "ES-CL": true, "ES-CM": true, "ES-CN": true,
+ "ES-CO": true, "ES-CR": true, "ES-CS": true, "ES-CT": true, "ES-CU": true,
+ "ES-EX": true, "ES-GA": true, "ES-GC": true, "ES-GI": true, "ES-GR": true,
+ "ES-GU": true, "ES-H": true, "ES-HU": true, "ES-IB": true, "ES-J": true,
+ "ES-L": true, "ES-LE": true, "ES-LO": true, "ES-LU": true, "ES-M": true,
+ "ES-MA": true, "ES-MC": true, "ES-MD": true, "ES-ML": true, "ES-MU": true,
+ "ES-NA": true, "ES-NC": true, "ES-O": true, "ES-OR": true, "ES-P": true,
+ "ES-PM": true, "ES-PO": true, "ES-PV": true, "ES-RI": true, "ES-S": true,
+ "ES-SA": true, "ES-SE": true, "ES-SG": true, "ES-SO": true, "ES-SS": true,
+ "ES-T": true, "ES-TE": true, "ES-TF": true, "ES-TO": true, "ES-V": true,
+ "ES-VA": true, "ES-VC": true, "ES-VI": true, "ES-Z": true, "ES-ZA": true,
+ "ET-AA": true, "ET-AF": true, "ET-AM": true, "ET-BE": true, "ET-DD": true,
+ "ET-GA": true, "ET-HA": true, "ET-OR": true, "ET-SN": true, "ET-SO": true,
+ "ET-TI": true, "FI-01": true, "FI-02": true, "FI-03": true, "FI-04": true,
+ "FI-05": true, "FI-06": true, "FI-07": true, "FI-08": true, "FI-09": true,
+ "FI-10": true, "FI-11": true, "FI-12": true, "FI-13": true, "FI-14": true,
+ "FI-15": true, "FI-16": true, "FI-17": true, "FI-18": true, "FI-19": true,
+ "FJ-C": true, "FJ-E": true, "FJ-N": true, "FJ-R": true, "FJ-W": true,
+ "FM-KSA": true, "FM-PNI": true, "FM-TRK": true, "FM-YAP": true, "FR-01": true,
+ "FR-02": true, "FR-03": true, "FR-04": true, "FR-05": true, "FR-06": true,
+ "FR-07": true, "FR-08": true, "FR-09": true, "FR-10": true, "FR-11": true,
+ "FR-12": true, "FR-13": true, "FR-14": true, "FR-15": true, "FR-16": true,
+ "FR-17": true, "FR-18": true, "FR-19": true, "FR-20R": true, "FR-21": true, "FR-22": true,
+ "FR-23": true, "FR-24": true, "FR-25": true, "FR-26": true, "FR-27": true,
+ "FR-28": true, "FR-29": true, "FR-2A": true, "FR-2B": true, "FR-30": true,
+ "FR-31": true, "FR-32": true, "FR-33": true, "FR-34": true, "FR-35": true,
+ "FR-36": true, "FR-37": true, "FR-38": true, "FR-39": true, "FR-40": true,
+ "FR-41": true, "FR-42": true, "FR-43": true, "FR-44": true, "FR-45": true,
+ "FR-46": true, "FR-47": true, "FR-48": true, "FR-49": true, "FR-50": true,
+ "FR-51": true, "FR-52": true, "FR-53": true, "FR-54": true, "FR-55": true,
+ "FR-56": true, "FR-57": true, "FR-58": true, "FR-59": true, "FR-60": true,
+ "FR-61": true, "FR-62": true, "FR-63": true, "FR-64": true, "FR-65": true,
+ "FR-66": true, "FR-67": true, "FR-68": true, "FR-69": true, "FR-70": true,
+ "FR-71": true, "FR-72": true, "FR-73": true, "FR-74": true, "FR-75": true,
+ "FR-76": true, "FR-77": true, "FR-78": true, "FR-79": true, "FR-80": true,
+ "FR-81": true, "FR-82": true, "FR-83": true, "FR-84": true, "FR-85": true,
+ "FR-86": true, "FR-87": true, "FR-88": true, "FR-89": true, "FR-90": true,
+ "FR-91": true, "FR-92": true, "FR-93": true, "FR-94": true, "FR-95": true,
+ "FR-ARA": true, "FR-BFC": true, "FR-BL": true, "FR-BRE": true, "FR-COR": true,
+ "FR-CP": true, "FR-CVL": true, "FR-GES": true, "FR-GF": true, "FR-GP": true,
+ "FR-GUA": true, "FR-HDF": true, "FR-IDF": true, "FR-LRE": true, "FR-MAY": true,
+ "FR-MF": true, "FR-MQ": true, "FR-NAQ": true, "FR-NC": true, "FR-NOR": true,
+ "FR-OCC": true, "FR-PAC": true, "FR-PDL": true, "FR-PF": true, "FR-PM": true,
+ "FR-RE": true, "FR-TF": true, "FR-WF": true, "FR-YT": true, "GA-1": true,
+ "GA-2": true, "GA-3": true, "GA-4": true, "GA-5": true, "GA-6": true,
+ "GA-7": true, "GA-8": true, "GA-9": true, "GB-ABC": true, "GB-ABD": true,
+ "GB-ABE": true, "GB-AGB": true, "GB-AGY": true, "GB-AND": true, "GB-ANN": true,
+ "GB-ANS": true, "GB-BAS": true, "GB-BBD": true, "GB-BDF": true, "GB-BDG": true,
+ "GB-BEN": true, "GB-BEX": true, "GB-BFS": true, "GB-BGE": true, "GB-BGW": true,
+ "GB-BIR": true, "GB-BKM": true, "GB-BMH": true, "GB-BNE": true, "GB-BNH": true,
+ "GB-BNS": true, "GB-BOL": true, "GB-BPL": true, "GB-BRC": true, "GB-BRD": true,
+ "GB-BRY": true, "GB-BST": true, "GB-BUR": true, "GB-CAM": true, "GB-CAY": true,
+ "GB-CBF": true, "GB-CCG": true, "GB-CGN": true, "GB-CHE": true, "GB-CHW": true,
+ "GB-CLD": true, "GB-CLK": true, "GB-CMA": true, "GB-CMD": true, "GB-CMN": true,
+ "GB-CON": true, "GB-COV": true, "GB-CRF": true, "GB-CRY": true, "GB-CWY": true,
+ "GB-DAL": true, "GB-DBY": true, "GB-DEN": true, "GB-DER": true, "GB-DEV": true,
+ "GB-DGY": true, "GB-DNC": true, "GB-DND": true, "GB-DOR": true, "GB-DRS": true,
+ "GB-DUD": true, "GB-DUR": true, "GB-EAL": true, "GB-EAW": true, "GB-EAY": true,
+ "GB-EDH": true, "GB-EDU": true, "GB-ELN": true, "GB-ELS": true, "GB-ENF": true,
+ "GB-ENG": true, "GB-ERW": true, "GB-ERY": true, "GB-ESS": true, "GB-ESX": true,
+ "GB-FAL": true, "GB-FIF": true, "GB-FLN": true, "GB-FMO": true, "GB-GAT": true,
+ "GB-GBN": true, "GB-GLG": true, "GB-GLS": true, "GB-GRE": true, "GB-GWN": true,
+ "GB-HAL": true, "GB-HAM": true, "GB-HAV": true, "GB-HCK": true, "GB-HEF": true,
+ "GB-HIL": true, "GB-HLD": true, "GB-HMF": true, "GB-HNS": true, "GB-HPL": true,
+ "GB-HRT": true, "GB-HRW": true, "GB-HRY": true, "GB-IOS": true, "GB-IOW": true,
+ "GB-ISL": true, "GB-IVC": true, "GB-KEC": true, "GB-KEN": true, "GB-KHL": true,
+ "GB-KIR": true, "GB-KTT": true, "GB-KWL": true, "GB-LAN": true, "GB-LBC": true,
+ "GB-LBH": true, "GB-LCE": true, "GB-LDS": true, "GB-LEC": true, "GB-LEW": true,
+ "GB-LIN": true, "GB-LIV": true, "GB-LND": true, "GB-LUT": true, "GB-MAN": true,
+ "GB-MDB": true, "GB-MDW": true, "GB-MEA": true, "GB-MIK": true, "GD-01": true,
+ "GB-MLN": true, "GB-MON": true, "GB-MRT": true, "GB-MRY": true, "GB-MTY": true,
+ "GB-MUL": true, "GB-NAY": true, "GB-NBL": true, "GB-NEL": true, "GB-NET": true,
+ "GB-NFK": true, "GB-NGM": true, "GB-NIR": true, "GB-NLK": true, "GB-NLN": true,
+ "GB-NMD": true, "GB-NSM": true, "GB-NTH": true, "GB-NTL": true, "GB-NTT": true,
+ "GB-NTY": true, "GB-NWM": true, "GB-NWP": true, "GB-NYK": true, "GB-OLD": true,
+ "GB-ORK": true, "GB-OXF": true, "GB-PEM": true, "GB-PKN": true, "GB-PLY": true,
+ "GB-POL": true, "GB-POR": true, "GB-POW": true, "GB-PTE": true, "GB-RCC": true,
+ "GB-RCH": true, "GB-RCT": true, "GB-RDB": true, "GB-RDG": true, "GB-RFW": true,
+ "GB-RIC": true, "GB-ROT": true, "GB-RUT": true, "GB-SAW": true, "GB-SAY": true,
+ "GB-SCB": true, "GB-SCT": true, "GB-SFK": true, "GB-SFT": true, "GB-SGC": true,
+ "GB-SHF": true, "GB-SHN": true, "GB-SHR": true, "GB-SKP": true, "GB-SLF": true,
+ "GB-SLG": true, "GB-SLK": true, "GB-SND": true, "GB-SOL": true, "GB-SOM": true,
+ "GB-SOS": true, "GB-SRY": true, "GB-STE": true, "GB-STG": true, "GB-STH": true,
+ "GB-STN": true, "GB-STS": true, "GB-STT": true, "GB-STY": true, "GB-SWA": true,
+ "GB-SWD": true, "GB-SWK": true, "GB-TAM": true, "GB-TFW": true, "GB-THR": true,
+ "GB-TOB": true, "GB-TOF": true, "GB-TRF": true, "GB-TWH": true, "GB-UKM": true,
+ "GB-VGL": true, "GB-WAR": true, "GB-WBK": true, "GB-WDU": true, "GB-WFT": true,
+ "GB-WGN": true, "GB-WIL": true, "GB-WKF": true, "GB-WLL": true, "GB-WLN": true,
+ "GB-WLS": true, "GB-WLV": true, "GB-WND": true, "GB-WNM": true, "GB-WOK": true,
+ "GB-WOR": true, "GB-WRL": true, "GB-WRT": true, "GB-WRX": true, "GB-WSM": true,
+ "GB-WSX": true, "GB-YOR": true, "GB-ZET": true, "GD-02": true, "GD-03": true,
+ "GD-04": true, "GD-05": true, "GD-06": true, "GD-10": true, "GE-AB": true,
+ "GE-AJ": true, "GE-GU": true, "GE-IM": true, "GE-KA": true, "GE-KK": true,
+ "GE-MM": true, "GE-RL": true, "GE-SJ": true, "GE-SK": true, "GE-SZ": true,
+ "GE-TB": true, "GH-AA": true, "GH-AH": true, "GH-AF": true, "GH-BA": true, "GH-BO": true, "GH-BE": true, "GH-CP": true,
+ "GH-EP": true, "GH-NP": true, "GH-TV": true, "GH-UE": true, "GH-UW": true,
+ "GH-WP": true, "GL-AV": true, "GL-KU": true, "GL-QA": true, "GL-QT": true, "GL-QE": true, "GL-SM": true,
+ "GM-B": true, "GM-L": true, "GM-M": true, "GM-N": true, "GM-U": true,
+ "GM-W": true, "GN-B": true, "GN-BE": true, "GN-BF": true, "GN-BK": true,
+ "GN-C": true, "GN-CO": true, "GN-D": true, "GN-DB": true, "GN-DI": true,
+ "GN-DL": true, "GN-DU": true, "GN-F": true, "GN-FA": true, "GN-FO": true,
+ "GN-FR": true, "GN-GA": true, "GN-GU": true, "GN-K": true, "GN-KA": true,
+ "GN-KB": true, "GN-KD": true, "GN-KE": true, "GN-KN": true, "GN-KO": true,
+ "GN-KS": true, "GN-L": true, "GN-LA": true, "GN-LE": true, "GN-LO": true,
+ "GN-M": true, "GN-MC": true, "GN-MD": true, "GN-ML": true, "GN-MM": true,
+ "GN-N": true, "GN-NZ": true, "GN-PI": true, "GN-SI": true, "GN-TE": true,
+ "GN-TO": true, "GN-YO": true, "GQ-AN": true, "GQ-BN": true, "GQ-BS": true,
+ "GQ-C": true, "GQ-CS": true, "GQ-I": true, "GQ-KN": true, "GQ-LI": true,
+ "GQ-WN": true, "GR-01": true, "GR-03": true, "GR-04": true, "GR-05": true,
+ "GR-06": true, "GR-07": true, "GR-11": true, "GR-12": true, "GR-13": true,
+ "GR-14": true, "GR-15": true, "GR-16": true, "GR-17": true, "GR-21": true,
+ "GR-22": true, "GR-23": true, "GR-24": true, "GR-31": true, "GR-32": true,
+ "GR-33": true, "GR-34": true, "GR-41": true, "GR-42": true, "GR-43": true,
+ "GR-44": true, "GR-51": true, "GR-52": true, "GR-53": true, "GR-54": true,
+ "GR-55": true, "GR-56": true, "GR-57": true, "GR-58": true, "GR-59": true,
+ "GR-61": true, "GR-62": true, "GR-63": true, "GR-64": true, "GR-69": true,
+ "GR-71": true, "GR-72": true, "GR-73": true, "GR-81": true, "GR-82": true,
+ "GR-83": true, "GR-84": true, "GR-85": true, "GR-91": true, "GR-92": true,
+ "GR-93": true, "GR-94": true, "GR-A": true, "GR-A1": true, "GR-B": true,
+ "GR-C": true, "GR-D": true, "GR-E": true, "GR-F": true, "GR-G": true,
+ "GR-H": true, "GR-I": true, "GR-J": true, "GR-K": true, "GR-L": true,
+ "GR-M": true, "GT-01": true, "GT-02": true, "GT-03": true, "GT-04": true,
+ "GT-05": true, "GT-06": true, "GT-07": true, "GT-08": true, "GT-09": true,
+ "GT-10": true, "GT-11": true, "GT-12": true, "GT-13": true, "GT-14": true,
+ "GT-15": true, "GT-16": true, "GT-17": true, "GT-18": true, "GT-19": true,
+ "GT-20": true, "GT-21": true, "GT-22": true, "GW-BA": true, "GW-BL": true,
+ "GW-BM": true, "GW-BS": true, "GW-CA": true, "GW-GA": true, "GW-L": true,
+ "GW-N": true, "GW-OI": true, "GW-QU": true, "GW-S": true, "GW-TO": true,
+ "GY-BA": true, "GY-CU": true, "GY-DE": true, "GY-EB": true, "GY-ES": true,
+ "GY-MA": true, "GY-PM": true, "GY-PT": true, "GY-UD": true, "GY-UT": true,
+ "HN-AT": true, "HN-CH": true, "HN-CL": true, "HN-CM": true, "HN-CP": true,
+ "HN-CR": true, "HN-EP": true, "HN-FM": true, "HN-GD": true, "HN-IB": true,
+ "HN-IN": true, "HN-LE": true, "HN-LP": true, "HN-OC": true, "HN-OL": true,
+ "HN-SB": true, "HN-VA": true, "HN-YO": true, "HR-01": true, "HR-02": true,
+ "HR-03": true, "HR-04": true, "HR-05": true, "HR-06": true, "HR-07": true,
+ "HR-08": true, "HR-09": true, "HR-10": true, "HR-11": true, "HR-12": true,
+ "HR-13": true, "HR-14": true, "HR-15": true, "HR-16": true, "HR-17": true,
+ "HR-18": true, "HR-19": true, "HR-20": true, "HR-21": true, "HT-AR": true,
+ "HT-CE": true, "HT-GA": true, "HT-ND": true, "HT-NE": true, "HT-NO": true, "HT-NI": true,
+ "HT-OU": true, "HT-SD": true, "HT-SE": true, "HU-BA": true, "HU-BC": true,
+ "HU-BE": true, "HU-BK": true, "HU-BU": true, "HU-BZ": true, "HU-CS": true,
+ "HU-DE": true, "HU-DU": true, "HU-EG": true, "HU-ER": true, "HU-FE": true,
+ "HU-GS": true, "HU-GY": true, "HU-HB": true, "HU-HE": true, "HU-HV": true,
+ "HU-JN": true, "HU-KE": true, "HU-KM": true, "HU-KV": true, "HU-MI": true,
+ "HU-NK": true, "HU-NO": true, "HU-NY": true, "HU-PE": true, "HU-PS": true,
+ "HU-SD": true, "HU-SF": true, "HU-SH": true, "HU-SK": true, "HU-SN": true,
+ "HU-SO": true, "HU-SS": true, "HU-ST": true, "HU-SZ": true, "HU-TB": true,
+ "HU-TO": true, "HU-VA": true, "HU-VE": true, "HU-VM": true, "HU-ZA": true,
+ "HU-ZE": true, "ID-AC": true, "ID-BA": true, "ID-BB": true, "ID-BE": true,
+ "ID-BT": true, "ID-GO": true, "ID-IJ": true, "ID-JA": true, "ID-JB": true,
+ "ID-JI": true, "ID-JK": true, "ID-JT": true, "ID-JW": true, "ID-KA": true,
+ "ID-KB": true, "ID-KI": true, "ID-KU": true, "ID-KR": true, "ID-KS": true,
+ "ID-KT": true, "ID-LA": true, "ID-MA": true, "ID-ML": true, "ID-MU": true,
+ "ID-NB": true, "ID-NT": true, "ID-NU": true, "ID-PA": true, "ID-PB": true,
+ "ID-PE": true, "ID-PP": true, "ID-PS": true, "ID-PT": true, "ID-RI": true,
+ "ID-SA": true, "ID-SB": true, "ID-SG": true, "ID-SL": true, "ID-SM": true,
+ "ID-SN": true, "ID-SR": true, "ID-SS": true, "ID-ST": true, "ID-SU": true,
+ "ID-YO": true, "IE-C": true, "IE-CE": true, "IE-CN": true, "IE-CO": true,
+ "IE-CW": true, "IE-D": true, "IE-DL": true, "IE-G": true, "IE-KE": true,
+ "IE-KK": true, "IE-KY": true, "IE-L": true, "IE-LD": true, "IE-LH": true,
+ "IE-LK": true, "IE-LM": true, "IE-LS": true, "IE-M": true, "IE-MH": true,
+ "IE-MN": true, "IE-MO": true, "IE-OY": true, "IE-RN": true, "IE-SO": true,
+ "IE-TA": true, "IE-U": true, "IE-WD": true, "IE-WH": true, "IE-WW": true,
+ "IE-WX": true, "IL-D": true, "IL-HA": true, "IL-JM": true, "IL-M": true,
+ "IL-TA": true, "IL-Z": true, "IN-AN": true, "IN-AP": true, "IN-AR": true,
+ "IN-AS": true, "IN-BR": true, "IN-CH": true, "IN-CT": true, "IN-DH": true,
+ "IN-DL": true, "IN-DN": true, "IN-GA": true, "IN-GJ": true, "IN-HP": true,
+ "IN-HR": true, "IN-JH": true, "IN-JK": true, "IN-KA": true, "IN-KL": true,
+ "IN-LD": true, "IN-MH": true, "IN-ML": true, "IN-MN": true, "IN-MP": true,
+ "IN-MZ": true, "IN-NL": true, "IN-TG": true, "IN-OR": true, "IN-PB": true, "IN-PY": true,
+ "IN-RJ": true, "IN-SK": true, "IN-TN": true, "IN-TR": true, "IN-UP": true,
+ "IN-UT": true, "IN-WB": true, "IQ-AN": true, "IQ-AR": true, "IQ-BA": true,
+ "IQ-BB": true, "IQ-BG": true, "IQ-DA": true, "IQ-DI": true, "IQ-DQ": true,
+ "IQ-KA": true, "IQ-KI": true, "IQ-MA": true, "IQ-MU": true, "IQ-NA": true, "IQ-NI": true,
+ "IQ-QA": true, "IQ-SD": true, "IQ-SW": true, "IQ-SU": true, "IQ-TS": true, "IQ-WA": true,
+ "IR-00": true, "IR-01": true, "IR-02": true, "IR-03": true, "IR-04": true, "IR-05": true,
+ "IR-06": true, "IR-07": true, "IR-08": true, "IR-09": true, "IR-10": true, "IR-11": true,
+ "IR-12": true, "IR-13": true, "IR-14": true, "IR-15": true, "IR-16": true,
+ "IR-17": true, "IR-18": true, "IR-19": true, "IR-20": true, "IR-21": true,
+ "IR-22": true, "IR-23": true, "IR-24": true, "IR-25": true, "IR-26": true,
+ "IR-27": true, "IR-28": true, "IR-29": true, "IR-30": true, "IR-31": true,
+ "IS-0": true, "IS-1": true, "IS-2": true, "IS-3": true, "IS-4": true,
+ "IS-5": true, "IS-6": true, "IS-7": true, "IS-8": true, "IT-21": true,
+ "IT-23": true, "IT-25": true, "IT-32": true, "IT-34": true, "IT-36": true,
+ "IT-42": true, "IT-45": true, "IT-52": true, "IT-55": true, "IT-57": true,
+ "IT-62": true, "IT-65": true, "IT-67": true, "IT-72": true, "IT-75": true,
+ "IT-77": true, "IT-78": true, "IT-82": true, "IT-88": true, "IT-AG": true,
+ "IT-AL": true, "IT-AN": true, "IT-AO": true, "IT-AP": true, "IT-AQ": true,
+ "IT-AR": true, "IT-AT": true, "IT-AV": true, "IT-BA": true, "IT-BG": true,
+ "IT-BI": true, "IT-BL": true, "IT-BN": true, "IT-BO": true, "IT-BR": true,
+ "IT-BS": true, "IT-BT": true, "IT-BZ": true, "IT-CA": true, "IT-CB": true,
+ "IT-CE": true, "IT-CH": true, "IT-CI": true, "IT-CL": true, "IT-CN": true,
+ "IT-CO": true, "IT-CR": true, "IT-CS": true, "IT-CT": true, "IT-CZ": true,
+ "IT-EN": true, "IT-FC": true, "IT-FE": true, "IT-FG": true, "IT-FI": true,
+ "IT-FM": true, "IT-FR": true, "IT-GE": true, "IT-GO": true, "IT-GR": true,
+ "IT-IM": true, "IT-IS": true, "IT-KR": true, "IT-LC": true, "IT-LE": true,
+ "IT-LI": true, "IT-LO": true, "IT-LT": true, "IT-LU": true, "IT-MB": true,
+ "IT-MC": true, "IT-ME": true, "IT-MI": true, "IT-MN": true, "IT-MO": true,
+ "IT-MS": true, "IT-MT": true, "IT-NA": true, "IT-NO": true, "IT-NU": true,
+ "IT-OG": true, "IT-OR": true, "IT-OT": true, "IT-PA": true, "IT-PC": true,
+ "IT-PD": true, "IT-PE": true, "IT-PG": true, "IT-PI": true, "IT-PN": true,
+ "IT-PO": true, "IT-PR": true, "IT-PT": true, "IT-PU": true, "IT-PV": true,
+ "IT-PZ": true, "IT-RA": true, "IT-RC": true, "IT-RE": true, "IT-RG": true,
+ "IT-RI": true, "IT-RM": true, "IT-RN": true, "IT-RO": true, "IT-SA": true,
+ "IT-SI": true, "IT-SO": true, "IT-SP": true, "IT-SR": true, "IT-SS": true,
+ "IT-SV": true, "IT-TA": true, "IT-TE": true, "IT-TN": true, "IT-TO": true,
+ "IT-TP": true, "IT-TR": true, "IT-TS": true, "IT-TV": true, "IT-UD": true,
+ "IT-VA": true, "IT-VB": true, "IT-VC": true, "IT-VE": true, "IT-VI": true,
+ "IT-VR": true, "IT-VS": true, "IT-VT": true, "IT-VV": true, "JM-01": true,
+ "JM-02": true, "JM-03": true, "JM-04": true, "JM-05": true, "JM-06": true,
+ "JM-07": true, "JM-08": true, "JM-09": true, "JM-10": true, "JM-11": true,
+ "JM-12": true, "JM-13": true, "JM-14": true, "JO-AJ": true, "JO-AM": true,
+ "JO-AQ": true, "JO-AT": true, "JO-AZ": true, "JO-BA": true, "JO-IR": true,
+ "JO-JA": true, "JO-KA": true, "JO-MA": true, "JO-MD": true, "JO-MN": true,
+ "JP-01": true, "JP-02": true, "JP-03": true, "JP-04": true, "JP-05": true,
+ "JP-06": true, "JP-07": true, "JP-08": true, "JP-09": true, "JP-10": true,
+ "JP-11": true, "JP-12": true, "JP-13": true, "JP-14": true, "JP-15": true,
+ "JP-16": true, "JP-17": true, "JP-18": true, "JP-19": true, "JP-20": true,
+ "JP-21": true, "JP-22": true, "JP-23": true, "JP-24": true, "JP-25": true,
+ "JP-26": true, "JP-27": true, "JP-28": true, "JP-29": true, "JP-30": true,
+ "JP-31": true, "JP-32": true, "JP-33": true, "JP-34": true, "JP-35": true,
+ "JP-36": true, "JP-37": true, "JP-38": true, "JP-39": true, "JP-40": true,
+ "JP-41": true, "JP-42": true, "JP-43": true, "JP-44": true, "JP-45": true,
+ "JP-46": true, "JP-47": true, "KE-01": true, "KE-02": true, "KE-03": true,
+ "KE-04": true, "KE-05": true, "KE-06": true, "KE-07": true, "KE-08": true,
+ "KE-09": true, "KE-10": true, "KE-11": true, "KE-12": true, "KE-13": true,
+ "KE-14": true, "KE-15": true, "KE-16": true, "KE-17": true, "KE-18": true,
+ "KE-19": true, "KE-20": true, "KE-21": true, "KE-22": true, "KE-23": true,
+ "KE-24": true, "KE-25": true, "KE-26": true, "KE-27": true, "KE-28": true,
+ "KE-29": true, "KE-30": true, "KE-31": true, "KE-32": true, "KE-33": true,
+ "KE-34": true, "KE-35": true, "KE-36": true, "KE-37": true, "KE-38": true,
+ "KE-39": true, "KE-40": true, "KE-41": true, "KE-42": true, "KE-43": true,
+ "KE-44": true, "KE-45": true, "KE-46": true, "KE-47": true, "KG-B": true,
+ "KG-C": true, "KG-GB": true, "KG-GO": true, "KG-J": true, "KG-N": true, "KG-O": true,
+ "KG-T": true, "KG-Y": true, "KH-1": true, "KH-10": true, "KH-11": true,
+ "KH-12": true, "KH-13": true, "KH-14": true, "KH-15": true, "KH-16": true,
+ "KH-17": true, "KH-18": true, "KH-19": true, "KH-2": true, "KH-20": true,
+ "KH-21": true, "KH-22": true, "KH-23": true, "KH-24": true, "KH-3": true,
+ "KH-4": true, "KH-5": true, "KH-6": true, "KH-7": true, "KH-8": true,
+ "KH-9": true, "KI-G": true, "KI-L": true, "KI-P": true, "KM-A": true,
+ "KM-G": true, "KM-M": true, "KN-01": true, "KN-02": true, "KN-03": true,
+ "KN-04": true, "KN-05": true, "KN-06": true, "KN-07": true, "KN-08": true,
+ "KN-09": true, "KN-10": true, "KN-11": true, "KN-12": true, "KN-13": true,
+ "KN-15": true, "KN-K": true, "KN-N": true, "KP-01": true, "KP-02": true,
+ "KP-03": true, "KP-04": true, "KP-05": true, "KP-06": true, "KP-07": true,
+ "KP-08": true, "KP-09": true, "KP-10": true, "KP-13": true, "KR-11": true,
+ "KR-26": true, "KR-27": true, "KR-28": true, "KR-29": true, "KR-30": true,
+ "KR-31": true, "KR-41": true, "KR-42": true, "KR-43": true, "KR-44": true,
+ "KR-45": true, "KR-46": true, "KR-47": true, "KR-48": true, "KR-49": true,
+ "KW-AH": true, "KW-FA": true, "KW-HA": true, "KW-JA": true, "KW-KU": true,
+ "KW-MU": true, "KZ-10": true, "KZ-75": true, "KZ-19": true, "KZ-11": true,
+ "KZ-15": true, "KZ-71": true, "KZ-23": true, "KZ-27": true, "KZ-47": true,
+ "KZ-55": true, "KZ-35": true, "KZ-39": true, "KZ-43": true, "KZ-63": true,
+ "KZ-79": true, "KZ-59": true, "KZ-61": true, "KZ-62": true, "KZ-31": true,
+ "KZ-33": true, "LA-AT": true, "LA-BK": true, "LA-BL": true,
+ "LA-CH": true, "LA-HO": true, "LA-KH": true, "LA-LM": true, "LA-LP": true,
+ "LA-OU": true, "LA-PH": true, "LA-SL": true, "LA-SV": true, "LA-VI": true,
+ "LA-VT": true, "LA-XA": true, "LA-XE": true, "LA-XI": true, "LA-XS": true,
+ "LB-AK": true, "LB-AS": true, "LB-BA": true, "LB-BH": true, "LB-BI": true,
+ "LB-JA": true, "LB-JL": true, "LB-NA": true, "LC-01": true, "LC-02": true,
+ "LC-03": true, "LC-05": true, "LC-06": true, "LC-07": true, "LC-08": true,
+ "LC-10": true, "LC-11": true, "LI-01": true, "LI-02": true,
+ "LI-03": true, "LI-04": true, "LI-05": true, "LI-06": true, "LI-07": true,
+ "LI-08": true, "LI-09": true, "LI-10": true, "LI-11": true, "LK-1": true,
+ "LK-11": true, "LK-12": true, "LK-13": true, "LK-2": true, "LK-21": true,
+ "LK-22": true, "LK-23": true, "LK-3": true, "LK-31": true, "LK-32": true,
+ "LK-33": true, "LK-4": true, "LK-41": true, "LK-42": true, "LK-43": true,
+ "LK-44": true, "LK-45": true, "LK-5": true, "LK-51": true, "LK-52": true,
+ "LK-53": true, "LK-6": true, "LK-61": true, "LK-62": true, "LK-7": true,
+ "LK-71": true, "LK-72": true, "LK-8": true, "LK-81": true, "LK-82": true,
+ "LK-9": true, "LK-91": true, "LK-92": true, "LR-BG": true, "LR-BM": true,
+ "LR-CM": true, "LR-GB": true, "LR-GG": true, "LR-GK": true, "LR-LO": true,
+ "LR-MG": true, "LR-MO": true, "LR-MY": true, "LR-NI": true, "LR-RI": true,
+ "LR-SI": true, "LS-A": true, "LS-B": true, "LS-C": true, "LS-D": true,
+ "LS-E": true, "LS-F": true, "LS-G": true, "LS-H": true, "LS-J": true,
+ "LS-K": true, "LT-AL": true, "LT-KL": true, "LT-KU": true, "LT-MR": true,
+ "LT-PN": true, "LT-SA": true, "LT-TA": true, "LT-TE": true, "LT-UT": true,
+ "LT-VL": true, "LU-CA": true, "LU-CL": true, "LU-DI": true, "LU-EC": true,
+ "LU-ES": true, "LU-GR": true, "LU-LU": true, "LU-ME": true, "LU-RD": true,
+ "LU-RM": true, "LU-VD": true, "LU-WI": true, "LU-D": true, "LU-G": true, "LU-L": true,
+ "LV-001": true, "LV-111": true, "LV-112": true, "LV-113": true,
+ "LV-002": true, "LV-003": true, "LV-004": true, "LV-005": true, "LV-006": true,
+ "LV-007": true, "LV-008": true, "LV-009": true, "LV-010": true, "LV-011": true,
+ "LV-012": true, "LV-013": true, "LV-014": true, "LV-015": true, "LV-016": true,
+ "LV-017": true, "LV-018": true, "LV-019": true, "LV-020": true, "LV-021": true,
+ "LV-022": true, "LV-023": true, "LV-024": true, "LV-025": true, "LV-026": true,
+ "LV-027": true, "LV-028": true, "LV-029": true, "LV-030": true, "LV-031": true,
+ "LV-032": true, "LV-033": true, "LV-034": true, "LV-035": true, "LV-036": true,
+ "LV-037": true, "LV-038": true, "LV-039": true, "LV-040": true, "LV-041": true,
+ "LV-042": true, "LV-043": true, "LV-044": true, "LV-045": true, "LV-046": true,
+ "LV-047": true, "LV-048": true, "LV-049": true, "LV-050": true, "LV-051": true,
+ "LV-052": true, "LV-053": true, "LV-054": true, "LV-055": true, "LV-056": true,
+ "LV-057": true, "LV-058": true, "LV-059": true, "LV-060": true, "LV-061": true,
+ "LV-062": true, "LV-063": true, "LV-064": true, "LV-065": true, "LV-066": true,
+ "LV-067": true, "LV-068": true, "LV-069": true, "LV-070": true, "LV-071": true,
+ "LV-072": true, "LV-073": true, "LV-074": true, "LV-075": true, "LV-076": true,
+ "LV-077": true, "LV-078": true, "LV-079": true, "LV-080": true, "LV-081": true,
+ "LV-082": true, "LV-083": true, "LV-084": true, "LV-085": true, "LV-086": true,
+ "LV-087": true, "LV-088": true, "LV-089": true, "LV-090": true, "LV-091": true,
+ "LV-092": true, "LV-093": true, "LV-094": true, "LV-095": true, "LV-096": true,
+ "LV-097": true, "LV-098": true, "LV-099": true, "LV-100": true, "LV-101": true,
+ "LV-102": true, "LV-103": true, "LV-104": true, "LV-105": true, "LV-106": true,
+ "LV-107": true, "LV-108": true, "LV-109": true, "LV-110": true, "LV-DGV": true,
+ "LV-JEL": true, "LV-JKB": true, "LV-JUR": true, "LV-LPX": true, "LV-REZ": true,
+ "LV-RIX": true, "LV-VEN": true, "LV-VMR": true, "LY-BA": true, "LY-BU": true,
+ "LY-DR": true, "LY-GT": true, "LY-JA": true, "LY-JB": true, "LY-JG": true,
+ "LY-JI": true, "LY-JU": true, "LY-KF": true, "LY-MB": true, "LY-MI": true,
+ "LY-MJ": true, "LY-MQ": true, "LY-NL": true, "LY-NQ": true, "LY-SB": true,
+ "LY-SR": true, "LY-TB": true, "LY-WA": true, "LY-WD": true, "LY-WS": true,
+ "LY-ZA": true, "MA-01": true, "MA-02": true, "MA-03": true, "MA-04": true,
+ "MA-05": true, "MA-06": true, "MA-07": true, "MA-08": true, "MA-09": true,
+ "MA-10": true, "MA-11": true, "MA-12": true, "MA-13": true, "MA-14": true,
+ "MA-15": true, "MA-16": true, "MA-AGD": true, "MA-AOU": true, "MA-ASZ": true,
+ "MA-AZI": true, "MA-BEM": true, "MA-BER": true, "MA-BES": true, "MA-BOD": true,
+ "MA-BOM": true, "MA-CAS": true, "MA-CHE": true, "MA-CHI": true, "MA-CHT": true,
+ "MA-ERR": true, "MA-ESI": true, "MA-ESM": true, "MA-FAH": true, "MA-FES": true,
+ "MA-FIG": true, "MA-GUE": true, "MA-HAJ": true, "MA-HAO": true, "MA-HOC": true,
+ "MA-IFR": true, "MA-INE": true, "MA-JDI": true, "MA-JRA": true, "MA-KEN": true,
+ "MA-KES": true, "MA-KHE": true, "MA-KHN": true, "MA-KHO": true, "MA-LAA": true,
+ "MA-LAR": true, "MA-MED": true, "MA-MEK": true, "MA-MMD": true, "MA-MMN": true,
+ "MA-MOH": true, "MA-MOU": true, "MA-NAD": true, "MA-NOU": true, "MA-OUA": true,
+ "MA-OUD": true, "MA-OUJ": true, "MA-RAB": true, "MA-SAF": true, "MA-SAL": true,
+ "MA-SEF": true, "MA-SET": true, "MA-SIK": true, "MA-SKH": true, "MA-SYB": true,
+ "MA-TAI": true, "MA-TAO": true, "MA-TAR": true, "MA-TAT": true, "MA-TAZ": true,
+ "MA-TET": true, "MA-TIZ": true, "MA-TNG": true, "MA-TNT": true, "MA-ZAG": true,
+ "MC-CL": true, "MC-CO": true, "MC-FO": true, "MC-GA": true, "MC-JE": true,
+ "MC-LA": true, "MC-MA": true, "MC-MC": true, "MC-MG": true, "MC-MO": true,
+ "MC-MU": true, "MC-PH": true, "MC-SD": true, "MC-SO": true, "MC-SP": true,
+ "MC-SR": true, "MC-VR": true, "MD-AN": true, "MD-BA": true, "MD-BD": true,
+ "MD-BR": true, "MD-BS": true, "MD-CA": true, "MD-CL": true, "MD-CM": true,
+ "MD-CR": true, "MD-CS": true, "MD-CT": true, "MD-CU": true, "MD-DO": true,
+ "MD-DR": true, "MD-DU": true, "MD-ED": true, "MD-FA": true, "MD-FL": true,
+ "MD-GA": true, "MD-GL": true, "MD-HI": true, "MD-IA": true, "MD-LE": true,
+ "MD-NI": true, "MD-OC": true, "MD-OR": true, "MD-RE": true, "MD-RI": true,
+ "MD-SD": true, "MD-SI": true, "MD-SN": true, "MD-SO": true, "MD-ST": true,
+ "MD-SV": true, "MD-TA": true, "MD-TE": true, "MD-UN": true, "ME-01": true,
+ "ME-02": true, "ME-03": true, "ME-04": true, "ME-05": true, "ME-06": true,
+ "ME-07": true, "ME-08": true, "ME-09": true, "ME-10": true, "ME-11": true,
+ "ME-12": true, "ME-13": true, "ME-14": true, "ME-15": true, "ME-16": true,
+ "ME-17": true, "ME-18": true, "ME-19": true, "ME-20": true, "ME-21": true, "ME-24": true,
+ "MG-A": true, "MG-D": true, "MG-F": true, "MG-M": true, "MG-T": true,
+ "MG-U": true, "MH-ALK": true, "MH-ALL": true, "MH-ARN": true, "MH-AUR": true,
+ "MH-EBO": true, "MH-ENI": true, "MH-JAB": true, "MH-JAL": true, "MH-KIL": true,
+ "MH-KWA": true, "MH-L": true, "MH-LAE": true, "MH-LIB": true, "MH-LIK": true,
+ "MH-MAJ": true, "MH-MAL": true, "MH-MEJ": true, "MH-MIL": true, "MH-NMK": true,
+ "MH-NMU": true, "MH-RON": true, "MH-T": true, "MH-UJA": true, "MH-UTI": true,
+ "MH-WTJ": true, "MH-WTN": true, "MK-101": true, "MK-102": true, "MK-103": true,
+ "MK-104": true, "MK-105": true,
+ "MK-106": true, "MK-107": true, "MK-108": true, "MK-109": true, "MK-201": true,
+ "MK-202": true, "MK-205": true, "MK-206": true, "MK-207": true, "MK-208": true,
+ "MK-209": true, "MK-210": true, "MK-211": true, "MK-301": true, "MK-303": true,
+ "MK-307": true, "MK-308": true, "MK-310": true, "MK-311": true, "MK-312": true,
+ "MK-401": true, "MK-402": true, "MK-403": true, "MK-404": true, "MK-405": true,
+ "MK-406": true, "MK-408": true, "MK-409": true, "MK-410": true, "MK-501": true,
+ "MK-502": true, "MK-503": true, "MK-505": true, "MK-506": true, "MK-507": true,
+ "MK-508": true, "MK-509": true, "MK-601": true, "MK-602": true, "MK-604": true,
+ "MK-605": true, "MK-606": true, "MK-607": true, "MK-608": true, "MK-609": true,
+ "MK-701": true, "MK-702": true, "MK-703": true, "MK-704": true, "MK-705": true,
+ "MK-803": true, "MK-804": true, "MK-806": true, "MK-807": true, "MK-809": true,
+ "MK-810": true, "MK-811": true, "MK-812": true, "MK-813": true, "MK-814": true,
+ "MK-816": true, "ML-1": true, "ML-2": true, "ML-3": true, "ML-4": true,
+ "ML-5": true, "ML-6": true, "ML-7": true, "ML-8": true, "ML-BKO": true,
+ "MM-01": true, "MM-02": true, "MM-03": true, "MM-04": true, "MM-05": true,
+ "MM-06": true, "MM-07": true, "MM-11": true, "MM-12": true, "MM-13": true,
+ "MM-14": true, "MM-15": true, "MM-16": true, "MM-17": true, "MM-18": true, "MN-035": true,
+ "MN-037": true, "MN-039": true, "MN-041": true, "MN-043": true, "MN-046": true,
+ "MN-047": true, "MN-049": true, "MN-051": true, "MN-053": true, "MN-055": true,
+ "MN-057": true, "MN-059": true, "MN-061": true, "MN-063": true, "MN-064": true,
+ "MN-065": true, "MN-067": true, "MN-069": true, "MN-071": true, "MN-073": true,
+ "MN-1": true, "MR-01": true, "MR-02": true, "MR-03": true, "MR-04": true,
+ "MR-05": true, "MR-06": true, "MR-07": true, "MR-08": true, "MR-09": true,
+ "MR-10": true, "MR-11": true, "MR-12": true, "MR-13": true, "MR-NKC": true, "MT-01": true,
+ "MT-02": true, "MT-03": true, "MT-04": true, "MT-05": true, "MT-06": true,
+ "MT-07": true, "MT-08": true, "MT-09": true, "MT-10": true, "MT-11": true,
+ "MT-12": true, "MT-13": true, "MT-14": true, "MT-15": true, "MT-16": true,
+ "MT-17": true, "MT-18": true, "MT-19": true, "MT-20": true, "MT-21": true,
+ "MT-22": true, "MT-23": true, "MT-24": true, "MT-25": true, "MT-26": true,
+ "MT-27": true, "MT-28": true, "MT-29": true, "MT-30": true, "MT-31": true,
+ "MT-32": true, "MT-33": true, "MT-34": true, "MT-35": true, "MT-36": true,
+ "MT-37": true, "MT-38": true, "MT-39": true, "MT-40": true, "MT-41": true,
+ "MT-42": true, "MT-43": true, "MT-44": true, "MT-45": true, "MT-46": true,
+ "MT-47": true, "MT-48": true, "MT-49": true, "MT-50": true, "MT-51": true,
+ "MT-52": true, "MT-53": true, "MT-54": true, "MT-55": true, "MT-56": true,
+ "MT-57": true, "MT-58": true, "MT-59": true, "MT-60": true, "MT-61": true,
+ "MT-62": true, "MT-63": true, "MT-64": true, "MT-65": true, "MT-66": true,
+ "MT-67": true, "MT-68": true, "MU-AG": true, "MU-BL": true, "MU-BR": true,
+ "MU-CC": true, "MU-CU": true, "MU-FL": true, "MU-GP": true, "MU-MO": true,
+ "MU-PA": true, "MU-PL": true, "MU-PU": true, "MU-PW": true, "MU-QB": true,
+ "MU-RO": true, "MU-RP": true, "MU-RR": true, "MU-SA": true, "MU-VP": true, "MV-00": true,
+ "MV-01": true, "MV-02": true, "MV-03": true, "MV-04": true, "MV-05": true,
+ "MV-07": true, "MV-08": true, "MV-12": true, "MV-13": true, "MV-14": true,
+ "MV-17": true, "MV-20": true, "MV-23": true, "MV-24": true, "MV-25": true,
+ "MV-26": true, "MV-27": true, "MV-28": true, "MV-29": true, "MV-CE": true,
+ "MV-MLE": true, "MV-NC": true, "MV-NO": true, "MV-SC": true, "MV-SU": true,
+ "MV-UN": true, "MV-US": true, "MW-BA": true, "MW-BL": true, "MW-C": true,
+ "MW-CK": true, "MW-CR": true, "MW-CT": true, "MW-DE": true, "MW-DO": true,
+ "MW-KR": true, "MW-KS": true, "MW-LI": true, "MW-LK": true, "MW-MC": true,
+ "MW-MG": true, "MW-MH": true, "MW-MU": true, "MW-MW": true, "MW-MZ": true,
+ "MW-N": true, "MW-NB": true, "MW-NE": true, "MW-NI": true, "MW-NK": true,
+ "MW-NS": true, "MW-NU": true, "MW-PH": true, "MW-RU": true, "MW-S": true,
+ "MW-SA": true, "MW-TH": true, "MW-ZO": true, "MX-AGU": true, "MX-BCN": true,
+ "MX-BCS": true, "MX-CAM": true, "MX-CHH": true, "MX-CHP": true, "MX-COA": true,
+ "MX-COL": true, "MX-CMX": true, "MX-DIF": true, "MX-DUR": true, "MX-GRO": true, "MX-GUA": true,
+ "MX-HID": true, "MX-JAL": true, "MX-MEX": true, "MX-MIC": true, "MX-MOR": true,
+ "MX-NAY": true, "MX-NLE": true, "MX-OAX": true, "MX-PUE": true, "MX-QUE": true,
+ "MX-ROO": true, "MX-SIN": true, "MX-SLP": true, "MX-SON": true, "MX-TAB": true,
+ "MX-TAM": true, "MX-TLA": true, "MX-VER": true, "MX-YUC": true, "MX-ZAC": true,
+ "MY-01": true, "MY-02": true, "MY-03": true, "MY-04": true, "MY-05": true,
+ "MY-06": true, "MY-07": true, "MY-08": true, "MY-09": true, "MY-10": true,
+ "MY-11": true, "MY-12": true, "MY-13": true, "MY-14": true, "MY-15": true,
+ "MY-16": true, "MZ-A": true, "MZ-B": true, "MZ-G": true, "MZ-I": true,
+ "MZ-L": true, "MZ-MPM": true, "MZ-N": true, "MZ-P": true, "MZ-Q": true,
+ "MZ-S": true, "MZ-T": true, "NA-CA": true, "NA-ER": true, "NA-HA": true,
+ "NA-KA": true, "NA-KE": true, "NA-KH": true, "NA-KU": true, "NA-KW": true, "NA-OD": true, "NA-OH": true,
+ "NA-OK": true, "NA-ON": true, "NA-OS": true, "NA-OT": true, "NA-OW": true,
+ "NE-1": true, "NE-2": true, "NE-3": true, "NE-4": true, "NE-5": true,
+ "NE-6": true, "NE-7": true, "NE-8": true, "NG-AB": true, "NG-AD": true,
+ "NG-AK": true, "NG-AN": true, "NG-BA": true, "NG-BE": true, "NG-BO": true,
+ "NG-BY": true, "NG-CR": true, "NG-DE": true, "NG-EB": true, "NG-ED": true,
+ "NG-EK": true, "NG-EN": true, "NG-FC": true, "NG-GO": true, "NG-IM": true,
+ "NG-JI": true, "NG-KD": true, "NG-KE": true, "NG-KN": true, "NG-KO": true,
+ "NG-KT": true, "NG-KW": true, "NG-LA": true, "NG-NA": true, "NG-NI": true,
+ "NG-OG": true, "NG-ON": true, "NG-OS": true, "NG-OY": true, "NG-PL": true,
+ "NG-RI": true, "NG-SO": true, "NG-TA": true, "NG-YO": true, "NG-ZA": true,
+ "NI-AN": true, "NI-AS": true, "NI-BO": true, "NI-CA": true, "NI-CI": true,
+ "NI-CO": true, "NI-ES": true, "NI-GR": true, "NI-JI": true, "NI-LE": true,
+ "NI-MD": true, "NI-MN": true, "NI-MS": true, "NI-MT": true, "NI-NS": true,
+ "NI-RI": true, "NI-SJ": true, "NL-AW": true, "NL-BQ1": true, "NL-BQ2": true,
+ "NL-BQ3": true, "NL-CW": true, "NL-DR": true, "NL-FL": true, "NL-FR": true,
+ "NL-GE": true, "NL-GR": true, "NL-LI": true, "NL-NB": true, "NL-NH": true,
+ "NL-OV": true, "NL-SX": true, "NL-UT": true, "NL-ZE": true, "NL-ZH": true,
+ "NO-03": true, "NO-11": true, "NO-15": true, "NO-16": true, "NO-17": true,
+ "NO-18": true, "NO-21": true, "NO-30": true, "NO-34": true, "NO-38": true,
+ "NO-42": true, "NO-46": true, "NO-50": true, "NO-54": true,
+ "NO-22": true, "NP-1": true, "NP-2": true, "NP-3": true, "NP-4": true,
+ "NP-5": true, "NP-BA": true, "NP-BH": true, "NP-DH": true, "NP-GA": true,
+ "NP-JA": true, "NP-KA": true, "NP-KO": true, "NP-LU": true, "NP-MA": true,
+ "NP-ME": true, "NP-NA": true, "NP-RA": true, "NP-SA": true, "NP-SE": true,
+ "NR-01": true, "NR-02": true, "NR-03": true, "NR-04": true, "NR-05": true,
+ "NR-06": true, "NR-07": true, "NR-08": true, "NR-09": true, "NR-10": true,
+ "NR-11": true, "NR-12": true, "NR-13": true, "NR-14": true, "NZ-AUK": true,
+ "NZ-BOP": true, "NZ-CAN": true, "NZ-CIT": true, "NZ-GIS": true, "NZ-HKB": true,
+ "NZ-MBH": true, "NZ-MWT": true, "NZ-N": true, "NZ-NSN": true, "NZ-NTL": true,
+ "NZ-OTA": true, "NZ-S": true, "NZ-STL": true, "NZ-TAS": true, "NZ-TKI": true,
+ "NZ-WGN": true, "NZ-WKO": true, "NZ-WTC": true, "OM-BA": true, "OM-BS": true, "OM-BU": true, "OM-BJ": true,
+ "OM-DA": true, "OM-MA": true, "OM-MU": true, "OM-SH": true, "OM-SJ": true, "OM-SS": true, "OM-WU": true,
+ "OM-ZA": true, "OM-ZU": true, "PA-1": true, "PA-2": true, "PA-3": true,
+ "PA-4": true, "PA-5": true, "PA-6": true, "PA-7": true, "PA-8": true,
+ "PA-9": true, "PA-EM": true, "PA-KY": true, "PA-NB": true, "PE-AMA": true,
+ "PE-ANC": true, "PE-APU": true, "PE-ARE": true, "PE-AYA": true, "PE-CAJ": true,
+ "PE-CAL": true, "PE-CUS": true, "PE-HUC": true, "PE-HUV": true, "PE-ICA": true,
+ "PE-JUN": true, "PE-LAL": true, "PE-LAM": true, "PE-LIM": true, "PE-LMA": true,
+ "PE-LOR": true, "PE-MDD": true, "PE-MOQ": true, "PE-PAS": true, "PE-PIU": true,
+ "PE-PUN": true, "PE-SAM": true, "PE-TAC": true, "PE-TUM": true, "PE-UCA": true,
+ "PG-CPK": true, "PG-CPM": true, "PG-EBR": true, "PG-EHG": true, "PG-EPW": true,
+ "PG-ESW": true, "PG-GPK": true, "PG-MBA": true, "PG-MPL": true, "PG-MPM": true,
+ "PG-MRL": true, "PG-NCD": true, "PG-NIK": true, "PG-NPP": true, "PG-NSB": true,
+ "PG-SAN": true, "PG-SHM": true, "PG-WBK": true, "PG-WHM": true, "PG-WPD": true,
+ "PH-00": true, "PH-01": true, "PH-02": true, "PH-03": true, "PH-05": true,
+ "PH-06": true, "PH-07": true, "PH-08": true, "PH-09": true, "PH-10": true,
+ "PH-11": true, "PH-12": true, "PH-13": true, "PH-14": true, "PH-15": true,
+ "PH-40": true, "PH-41": true, "PH-ABR": true, "PH-AGN": true, "PH-AGS": true,
+ "PH-AKL": true, "PH-ALB": true, "PH-ANT": true, "PH-APA": true, "PH-AUR": true,
+ "PH-BAN": true, "PH-BAS": true, "PH-BEN": true, "PH-BIL": true, "PH-BOH": true,
+ "PH-BTG": true, "PH-BTN": true, "PH-BUK": true, "PH-BUL": true, "PH-CAG": true,
+ "PH-CAM": true, "PH-CAN": true, "PH-CAP": true, "PH-CAS": true, "PH-CAT": true,
+ "PH-CAV": true, "PH-CEB": true, "PH-COM": true, "PH-DAO": true, "PH-DAS": true,
+ "PH-DAV": true, "PH-DIN": true, "PH-EAS": true, "PH-GUI": true, "PH-IFU": true,
+ "PH-ILI": true, "PH-ILN": true, "PH-ILS": true, "PH-ISA": true, "PH-KAL": true,
+ "PH-LAG": true, "PH-LAN": true, "PH-LAS": true, "PH-LEY": true, "PH-LUN": true,
+ "PH-MAD": true, "PH-MAG": true, "PH-MAS": true, "PH-MDC": true, "PH-MDR": true,
+ "PH-MOU": true, "PH-MSC": true, "PH-MSR": true, "PH-NCO": true, "PH-NEC": true,
+ "PH-NER": true, "PH-NSA": true, "PH-NUE": true, "PH-NUV": true, "PH-PAM": true,
+ "PH-PAN": true, "PH-PLW": true, "PH-QUE": true, "PH-QUI": true, "PH-RIZ": true,
+ "PH-ROM": true, "PH-SAR": true, "PH-SCO": true, "PH-SIG": true, "PH-SLE": true,
+ "PH-SLU": true, "PH-SOR": true, "PH-SUK": true, "PH-SUN": true, "PH-SUR": true,
+ "PH-TAR": true, "PH-TAW": true, "PH-WSA": true, "PH-ZAN": true, "PH-ZAS": true,
+ "PH-ZMB": true, "PH-ZSI": true, "PK-BA": true, "PK-GB": true, "PK-IS": true,
+ "PK-JK": true, "PK-KP": true, "PK-PB": true, "PK-SD": true, "PK-TA": true,
+ "PL-02": true, "PL-04": true, "PL-06": true, "PL-08": true, "PL-10": true,
+ "PL-12": true, "PL-14": true, "PL-16": true, "PL-18": true, "PL-20": true,
+ "PL-22": true, "PL-24": true, "PL-26": true, "PL-28": true, "PL-30": true, "PL-32": true,
+ "PS-BTH": true, "PS-DEB": true, "PS-GZA": true, "PS-HBN": true,
+ "PS-JEM": true, "PS-JEN": true, "PS-JRH": true, "PS-KYS": true, "PS-NBS": true,
+ "PS-NGZ": true, "PS-QQA": true, "PS-RBH": true, "PS-RFH": true, "PS-SLT": true,
+ "PS-TBS": true, "PS-TKM": true, "PT-01": true, "PT-02": true, "PT-03": true,
+ "PT-04": true, "PT-05": true, "PT-06": true, "PT-07": true, "PT-08": true,
+ "PT-09": true, "PT-10": true, "PT-11": true, "PT-12": true, "PT-13": true,
+ "PT-14": true, "PT-15": true, "PT-16": true, "PT-17": true, "PT-18": true,
+ "PT-20": true, "PT-30": true, "PW-002": true, "PW-004": true, "PW-010": true,
+ "PW-050": true, "PW-100": true, "PW-150": true, "PW-212": true, "PW-214": true,
+ "PW-218": true, "PW-222": true, "PW-224": true, "PW-226": true, "PW-227": true,
+ "PW-228": true, "PW-350": true, "PW-370": true, "PY-1": true, "PY-10": true,
+ "PY-11": true, "PY-12": true, "PY-13": true, "PY-14": true, "PY-15": true,
+ "PY-16": true, "PY-19": true, "PY-2": true, "PY-3": true, "PY-4": true,
+ "PY-5": true, "PY-6": true, "PY-7": true, "PY-8": true, "PY-9": true,
+ "PY-ASU": true, "QA-DA": true, "QA-KH": true, "QA-MS": true, "QA-RA": true,
+ "QA-US": true, "QA-WA": true, "QA-ZA": true, "RO-AB": true, "RO-AG": true,
+ "RO-AR": true, "RO-B": true, "RO-BC": true, "RO-BH": true, "RO-BN": true,
+ "RO-BR": true, "RO-BT": true, "RO-BV": true, "RO-BZ": true, "RO-CJ": true,
+ "RO-CL": true, "RO-CS": true, "RO-CT": true, "RO-CV": true, "RO-DB": true,
+ "RO-DJ": true, "RO-GJ": true, "RO-GL": true, "RO-GR": true, "RO-HD": true,
+ "RO-HR": true, "RO-IF": true, "RO-IL": true, "RO-IS": true, "RO-MH": true,
+ "RO-MM": true, "RO-MS": true, "RO-NT": true, "RO-OT": true, "RO-PH": true,
+ "RO-SB": true, "RO-SJ": true, "RO-SM": true, "RO-SV": true, "RO-TL": true,
+ "RO-TM": true, "RO-TR": true, "RO-VL": true, "RO-VN": true, "RO-VS": true,
+ "RS-00": true, "RS-01": true, "RS-02": true, "RS-03": true, "RS-04": true,
+ "RS-05": true, "RS-06": true, "RS-07": true, "RS-08": true, "RS-09": true,
+ "RS-10": true, "RS-11": true, "RS-12": true, "RS-13": true, "RS-14": true,
+ "RS-15": true, "RS-16": true, "RS-17": true, "RS-18": true, "RS-19": true,
+ "RS-20": true, "RS-21": true, "RS-22": true, "RS-23": true, "RS-24": true,
+ "RS-25": true, "RS-26": true, "RS-27": true, "RS-28": true, "RS-29": true,
+ "RS-KM": true, "RS-VO": true, "RU-AD": true, "RU-AL": true, "RU-ALT": true,
+ "RU-AMU": true, "RU-ARK": true, "RU-AST": true, "RU-BA": true, "RU-BEL": true,
+ "RU-BRY": true, "RU-BU": true, "RU-CE": true, "RU-CHE": true, "RU-CHU": true,
+ "RU-CU": true, "RU-DA": true, "RU-IN": true, "RU-IRK": true, "RU-IVA": true,
+ "RU-KAM": true, "RU-KB": true, "RU-KC": true, "RU-KDA": true, "RU-KEM": true,
+ "RU-KGD": true, "RU-KGN": true, "RU-KHA": true, "RU-KHM": true, "RU-KIR": true,
+ "RU-KK": true, "RU-KL": true, "RU-KLU": true, "RU-KO": true, "RU-KOS": true,
+ "RU-KR": true, "RU-KRS": true, "RU-KYA": true, "RU-LEN": true, "RU-LIP": true,
+ "RU-MAG": true, "RU-ME": true, "RU-MO": true, "RU-MOS": true, "RU-MOW": true,
+ "RU-MUR": true, "RU-NEN": true, "RU-NGR": true, "RU-NIZ": true, "RU-NVS": true,
+ "RU-OMS": true, "RU-ORE": true, "RU-ORL": true, "RU-PER": true, "RU-PNZ": true,
+ "RU-PRI": true, "RU-PSK": true, "RU-ROS": true, "RU-RYA": true, "RU-SA": true,
+ "RU-SAK": true, "RU-SAM": true, "RU-SAR": true, "RU-SE": true, "RU-SMO": true,
+ "RU-SPE": true, "RU-STA": true, "RU-SVE": true, "RU-TA": true, "RU-TAM": true,
+ "RU-TOM": true, "RU-TUL": true, "RU-TVE": true, "RU-TY": true, "RU-TYU": true,
+ "RU-UD": true, "RU-ULY": true, "RU-VGG": true, "RU-VLA": true, "RU-VLG": true,
+ "RU-VOR": true, "RU-YAN": true, "RU-YAR": true, "RU-YEV": true, "RU-ZAB": true,
+ "RW-01": true, "RW-02": true, "RW-03": true, "RW-04": true, "RW-05": true,
+ "SA-01": true, "SA-02": true, "SA-03": true, "SA-04": true, "SA-05": true,
+ "SA-06": true, "SA-07": true, "SA-08": true, "SA-09": true, "SA-10": true,
+ "SA-11": true, "SA-12": true, "SA-14": true, "SB-CE": true, "SB-CH": true,
+ "SB-CT": true, "SB-GU": true, "SB-IS": true, "SB-MK": true, "SB-ML": true,
+ "SB-RB": true, "SB-TE": true, "SB-WE": true, "SC-01": true, "SC-02": true,
+ "SC-03": true, "SC-04": true, "SC-05": true, "SC-06": true, "SC-07": true,
+ "SC-08": true, "SC-09": true, "SC-10": true, "SC-11": true, "SC-12": true,
+ "SC-13": true, "SC-14": true, "SC-15": true, "SC-16": true, "SC-17": true,
+ "SC-18": true, "SC-19": true, "SC-20": true, "SC-21": true, "SC-22": true,
+ "SC-23": true, "SC-24": true, "SC-25": true, "SD-DC": true, "SD-DE": true,
+ "SD-DN": true, "SD-DS": true, "SD-DW": true, "SD-GD": true, "SD-GK": true, "SD-GZ": true,
+ "SD-KA": true, "SD-KH": true, "SD-KN": true, "SD-KS": true, "SD-NB": true,
+ "SD-NO": true, "SD-NR": true, "SD-NW": true, "SD-RS": true, "SD-SI": true,
+ "SE-AB": true, "SE-AC": true, "SE-BD": true, "SE-C": true, "SE-D": true,
+ "SE-E": true, "SE-F": true, "SE-G": true, "SE-H": true, "SE-I": true,
+ "SE-K": true, "SE-M": true, "SE-N": true, "SE-O": true, "SE-S": true,
+ "SE-T": true, "SE-U": true, "SE-W": true, "SE-X": true, "SE-Y": true,
+ "SE-Z": true, "SG-01": true, "SG-02": true, "SG-03": true, "SG-04": true,
+ "SG-05": true, "SH-AC": true, "SH-HL": true, "SH-TA": true, "SI-001": true,
+ "SI-002": true, "SI-003": true, "SI-004": true, "SI-005": true, "SI-006": true,
+ "SI-007": true, "SI-008": true, "SI-009": true, "SI-010": true, "SI-011": true,
+ "SI-012": true, "SI-013": true, "SI-014": true, "SI-015": true, "SI-016": true,
+ "SI-017": true, "SI-018": true, "SI-019": true, "SI-020": true, "SI-021": true,
+ "SI-022": true, "SI-023": true, "SI-024": true, "SI-025": true, "SI-026": true,
+ "SI-027": true, "SI-028": true, "SI-029": true, "SI-030": true, "SI-031": true,
+ "SI-032": true, "SI-033": true, "SI-034": true, "SI-035": true, "SI-036": true,
+ "SI-037": true, "SI-038": true, "SI-039": true, "SI-040": true, "SI-041": true,
+ "SI-042": true, "SI-043": true, "SI-044": true, "SI-045": true, "SI-046": true,
+ "SI-047": true, "SI-048": true, "SI-049": true, "SI-050": true, "SI-051": true,
+ "SI-052": true, "SI-053": true, "SI-054": true, "SI-055": true, "SI-056": true,
+ "SI-057": true, "SI-058": true, "SI-059": true, "SI-060": true, "SI-061": true,
+ "SI-062": true, "SI-063": true, "SI-064": true, "SI-065": true, "SI-066": true,
+ "SI-067": true, "SI-068": true, "SI-069": true, "SI-070": true, "SI-071": true,
+ "SI-072": true, "SI-073": true, "SI-074": true, "SI-075": true, "SI-076": true,
+ "SI-077": true, "SI-078": true, "SI-079": true, "SI-080": true, "SI-081": true,
+ "SI-082": true, "SI-083": true, "SI-084": true, "SI-085": true, "SI-086": true,
+ "SI-087": true, "SI-088": true, "SI-089": true, "SI-090": true, "SI-091": true,
+ "SI-092": true, "SI-093": true, "SI-094": true, "SI-095": true, "SI-096": true,
+ "SI-097": true, "SI-098": true, "SI-099": true, "SI-100": true, "SI-101": true,
+ "SI-102": true, "SI-103": true, "SI-104": true, "SI-105": true, "SI-106": true,
+ "SI-107": true, "SI-108": true, "SI-109": true, "SI-110": true, "SI-111": true,
+ "SI-112": true, "SI-113": true, "SI-114": true, "SI-115": true, "SI-116": true,
+ "SI-117": true, "SI-118": true, "SI-119": true, "SI-120": true, "SI-121": true,
+ "SI-122": true, "SI-123": true, "SI-124": true, "SI-125": true, "SI-126": true,
+ "SI-127": true, "SI-128": true, "SI-129": true, "SI-130": true, "SI-131": true,
+ "SI-132": true, "SI-133": true, "SI-134": true, "SI-135": true, "SI-136": true,
+ "SI-137": true, "SI-138": true, "SI-139": true, "SI-140": true, "SI-141": true,
+ "SI-142": true, "SI-143": true, "SI-144": true, "SI-146": true, "SI-147": true,
+ "SI-148": true, "SI-149": true, "SI-150": true, "SI-151": true, "SI-152": true,
+ "SI-153": true, "SI-154": true, "SI-155": true, "SI-156": true, "SI-157": true,
+ "SI-158": true, "SI-159": true, "SI-160": true, "SI-161": true, "SI-162": true,
+ "SI-163": true, "SI-164": true, "SI-165": true, "SI-166": true, "SI-167": true,
+ "SI-168": true, "SI-169": true, "SI-170": true, "SI-171": true, "SI-172": true,
+ "SI-173": true, "SI-174": true, "SI-175": true, "SI-176": true, "SI-177": true,
+ "SI-178": true, "SI-179": true, "SI-180": true, "SI-181": true, "SI-182": true,
+ "SI-183": true, "SI-184": true, "SI-185": true, "SI-186": true, "SI-187": true,
+ "SI-188": true, "SI-189": true, "SI-190": true, "SI-191": true, "SI-192": true,
+ "SI-193": true, "SI-194": true, "SI-195": true, "SI-196": true, "SI-197": true,
+ "SI-198": true, "SI-199": true, "SI-200": true, "SI-201": true, "SI-202": true,
+ "SI-203": true, "SI-204": true, "SI-205": true, "SI-206": true, "SI-207": true,
+ "SI-208": true, "SI-209": true, "SI-210": true, "SI-211": true, "SI-212": true, "SI-213": true, "SK-BC": true,
+ "SK-BL": true, "SK-KI": true, "SK-NI": true, "SK-PV": true, "SK-TA": true,
+ "SK-TC": true, "SK-ZI": true, "SL-E": true, "SL-N": true, "SL-S": true,
+ "SL-W": true, "SM-01": true, "SM-02": true, "SM-03": true, "SM-04": true,
+ "SM-05": true, "SM-06": true, "SM-07": true, "SM-08": true, "SM-09": true,
+ "SN-DB": true, "SN-DK": true, "SN-FK": true, "SN-KA": true, "SN-KD": true,
+ "SN-KE": true, "SN-KL": true, "SN-LG": true, "SN-MT": true, "SN-SE": true,
+ "SN-SL": true, "SN-TC": true, "SN-TH": true, "SN-ZG": true, "SO-AW": true,
+ "SO-BK": true, "SO-BN": true, "SO-BR": true, "SO-BY": true, "SO-GA": true,
+ "SO-GE": true, "SO-HI": true, "SO-JD": true, "SO-JH": true, "SO-MU": true,
+ "SO-NU": true, "SO-SA": true, "SO-SD": true, "SO-SH": true, "SO-SO": true,
+ "SO-TO": true, "SO-WO": true, "SR-BR": true, "SR-CM": true, "SR-CR": true,
+ "SR-MA": true, "SR-NI": true, "SR-PM": true, "SR-PR": true, "SR-SA": true,
+ "SR-SI": true, "SR-WA": true, "SS-BN": true, "SS-BW": true, "SS-EC": true,
+ "SS-EE8": true, "SS-EE": true, "SS-EW": true, "SS-JG": true, "SS-LK": true, "SS-NU": true,
+ "SS-UY": true, "SS-WR": true, "ST-01": true, "ST-P": true, "ST-S": true, "SV-AH": true,
+ "SV-CA": true, "SV-CH": true, "SV-CU": true, "SV-LI": true, "SV-MO": true,
+ "SV-PA": true, "SV-SA": true, "SV-SM": true, "SV-SO": true, "SV-SS": true,
+ "SV-SV": true, "SV-UN": true, "SV-US": true, "SY-DI": true, "SY-DR": true,
+ "SY-DY": true, "SY-HA": true, "SY-HI": true, "SY-HL": true, "SY-HM": true,
+ "SY-ID": true, "SY-LA": true, "SY-QU": true, "SY-RA": true, "SY-RD": true,
+ "SY-SU": true, "SY-TA": true, "SZ-HH": true, "SZ-LU": true, "SZ-MA": true,
+ "SZ-SH": true, "TD-BA": true, "TD-BG": true, "TD-BO": true, "TD-CB": true,
+ "TD-EN": true, "TD-GR": true, "TD-HL": true, "TD-KA": true, "TD-LC": true,
+ "TD-LO": true, "TD-LR": true, "TD-MA": true, "TD-MC": true, "TD-ME": true,
+ "TD-MO": true, "TD-ND": true, "TD-OD": true, "TD-SA": true, "TD-SI": true,
+ "TD-TA": true, "TD-TI": true, "TD-WF": true, "TG-C": true, "TG-K": true,
+ "TG-M": true, "TG-P": true, "TG-S": true, "TH-10": true, "TH-11": true,
+ "TH-12": true, "TH-13": true, "TH-14": true, "TH-15": true, "TH-16": true,
+ "TH-17": true, "TH-18": true, "TH-19": true, "TH-20": true, "TH-21": true,
+ "TH-22": true, "TH-23": true, "TH-24": true, "TH-25": true, "TH-26": true,
+ "TH-27": true, "TH-30": true, "TH-31": true, "TH-32": true, "TH-33": true,
+ "TH-34": true, "TH-35": true, "TH-36": true, "TH-37": true, "TH-38": true, "TH-39": true,
+ "TH-40": true, "TH-41": true, "TH-42": true, "TH-43": true, "TH-44": true,
+ "TH-45": true, "TH-46": true, "TH-47": true, "TH-48": true, "TH-49": true,
+ "TH-50": true, "TH-51": true, "TH-52": true, "TH-53": true, "TH-54": true,
+ "TH-55": true, "TH-56": true, "TH-57": true, "TH-58": true, "TH-60": true,
+ "TH-61": true, "TH-62": true, "TH-63": true, "TH-64": true, "TH-65": true,
+ "TH-66": true, "TH-67": true, "TH-70": true, "TH-71": true, "TH-72": true,
+ "TH-73": true, "TH-74": true, "TH-75": true, "TH-76": true, "TH-77": true,
+ "TH-80": true, "TH-81": true, "TH-82": true, "TH-83": true, "TH-84": true,
+ "TH-85": true, "TH-86": true, "TH-90": true, "TH-91": true, "TH-92": true,
+ "TH-93": true, "TH-94": true, "TH-95": true, "TH-96": true, "TH-S": true,
+ "TJ-GB": true, "TJ-KT": true, "TJ-SU": true, "TJ-DU": true, "TJ-RA": true, "TL-AL": true, "TL-AN": true,
+ "TL-BA": true, "TL-BO": true, "TL-CO": true, "TL-DI": true, "TL-ER": true,
+ "TL-LA": true, "TL-LI": true, "TL-MF": true, "TL-MT": true, "TL-OE": true,
+ "TL-VI": true, "TM-A": true, "TM-B": true, "TM-D": true, "TM-L": true,
+ "TM-M": true, "TM-S": true, "TN-11": true, "TN-12": true, "TN-13": true,
+ "TN-14": true, "TN-21": true, "TN-22": true, "TN-23": true, "TN-31": true,
+ "TN-32": true, "TN-33": true, "TN-34": true, "TN-41": true, "TN-42": true,
+ "TN-43": true, "TN-51": true, "TN-52": true, "TN-53": true, "TN-61": true,
+ "TN-71": true, "TN-72": true, "TN-73": true, "TN-81": true, "TN-82": true,
+ "TN-83": true, "TO-01": true, "TO-02": true, "TO-03": true, "TO-04": true,
+ "TO-05": true, "TR-01": true, "TR-02": true, "TR-03": true, "TR-04": true,
+ "TR-05": true, "TR-06": true, "TR-07": true, "TR-08": true, "TR-09": true,
+ "TR-10": true, "TR-11": true, "TR-12": true, "TR-13": true, "TR-14": true,
+ "TR-15": true, "TR-16": true, "TR-17": true, "TR-18": true, "TR-19": true,
+ "TR-20": true, "TR-21": true, "TR-22": true, "TR-23": true, "TR-24": true,
+ "TR-25": true, "TR-26": true, "TR-27": true, "TR-28": true, "TR-29": true,
+ "TR-30": true, "TR-31": true, "TR-32": true, "TR-33": true, "TR-34": true,
+ "TR-35": true, "TR-36": true, "TR-37": true, "TR-38": true, "TR-39": true,
+ "TR-40": true, "TR-41": true, "TR-42": true, "TR-43": true, "TR-44": true,
+ "TR-45": true, "TR-46": true, "TR-47": true, "TR-48": true, "TR-49": true,
+ "TR-50": true, "TR-51": true, "TR-52": true, "TR-53": true, "TR-54": true,
+ "TR-55": true, "TR-56": true, "TR-57": true, "TR-58": true, "TR-59": true,
+ "TR-60": true, "TR-61": true, "TR-62": true, "TR-63": true, "TR-64": true,
+ "TR-65": true, "TR-66": true, "TR-67": true, "TR-68": true, "TR-69": true,
+ "TR-70": true, "TR-71": true, "TR-72": true, "TR-73": true, "TR-74": true,
+ "TR-75": true, "TR-76": true, "TR-77": true, "TR-78": true, "TR-79": true,
+ "TR-80": true, "TR-81": true, "TT-ARI": true, "TT-CHA": true, "TT-CTT": true,
+ "TT-DMN": true, "TT-ETO": true, "TT-MRC": true, "TT-TOB": true, "TT-PED": true, "TT-POS": true, "TT-PRT": true,
+ "TT-PTF": true, "TT-RCM": true, "TT-SFO": true, "TT-SGE": true, "TT-SIP": true,
+ "TT-SJL": true, "TT-TUP": true, "TT-WTO": true, "TV-FUN": true, "TV-NIT": true,
+ "TV-NKF": true, "TV-NKL": true, "TV-NMA": true, "TV-NMG": true, "TV-NUI": true,
+ "TV-VAI": true, "TW-CHA": true, "TW-CYI": true, "TW-CYQ": true, "TW-KIN": true, "TW-HSQ": true,
+ "TW-HSZ": true, "TW-HUA": true, "TW-LIE": true, "TW-ILA": true, "TW-KEE": true, "TW-KHH": true,
+ "TW-KHQ": true, "TW-MIA": true, "TW-NAN": true, "TW-NWT": true, "TW-PEN": true, "TW-PIF": true,
+ "TW-TAO": true, "TW-TNN": true, "TW-TNQ": true, "TW-TPE": true, "TW-TPQ": true,
+ "TW-TTT": true, "TW-TXG": true, "TW-TXQ": true, "TW-YUN": true, "TZ-01": true,
+ "TZ-02": true, "TZ-03": true, "TZ-04": true, "TZ-05": true, "TZ-06": true,
+ "TZ-07": true, "TZ-08": true, "TZ-09": true, "TZ-10": true, "TZ-11": true,
+ "TZ-12": true, "TZ-13": true, "TZ-14": true, "TZ-15": true, "TZ-16": true,
+ "TZ-17": true, "TZ-18": true, "TZ-19": true, "TZ-20": true, "TZ-21": true,
+ "TZ-22": true, "TZ-23": true, "TZ-24": true, "TZ-25": true, "TZ-26": true, "TZ-27": true, "TZ-28": true, "TZ-29": true, "TZ-30": true, "TZ-31": true,
+ "UA-05": true, "UA-07": true, "UA-09": true, "UA-12": true, "UA-14": true,
+ "UA-18": true, "UA-21": true, "UA-23": true, "UA-26": true, "UA-30": true,
+ "UA-32": true, "UA-35": true, "UA-40": true, "UA-43": true, "UA-46": true,
+ "UA-48": true, "UA-51": true, "UA-53": true, "UA-56": true, "UA-59": true,
+ "UA-61": true, "UA-63": true, "UA-65": true, "UA-68": true, "UA-71": true,
+ "UA-74": true, "UA-77": true, "UG-101": true, "UG-102": true, "UG-103": true,
+ "UG-104": true, "UG-105": true, "UG-106": true, "UG-107": true, "UG-108": true,
+ "UG-109": true, "UG-110": true, "UG-111": true, "UG-112": true, "UG-113": true,
+ "UG-114": true, "UG-115": true, "UG-116": true, "UG-201": true, "UG-202": true,
+ "UG-203": true, "UG-204": true, "UG-205": true, "UG-206": true, "UG-207": true,
+ "UG-208": true, "UG-209": true, "UG-210": true, "UG-211": true, "UG-212": true,
+ "UG-213": true, "UG-214": true, "UG-215": true, "UG-216": true, "UG-217": true,
+ "UG-218": true, "UG-219": true, "UG-220": true, "UG-221": true, "UG-222": true,
+ "UG-223": true, "UG-224": true, "UG-301": true, "UG-302": true, "UG-303": true,
+ "UG-304": true, "UG-305": true, "UG-306": true, "UG-307": true, "UG-308": true,
+ "UG-309": true, "UG-310": true, "UG-311": true, "UG-312": true, "UG-313": true,
+ "UG-314": true, "UG-315": true, "UG-316": true, "UG-317": true, "UG-318": true,
+ "UG-319": true, "UG-320": true, "UG-321": true, "UG-401": true, "UG-402": true,
+ "UG-403": true, "UG-404": true, "UG-405": true, "UG-406": true, "UG-407": true,
+ "UG-408": true, "UG-409": true, "UG-410": true, "UG-411": true, "UG-412": true,
+ "UG-413": true, "UG-414": true, "UG-415": true, "UG-416": true, "UG-417": true,
+ "UG-418": true, "UG-419": true, "UG-C": true, "UG-E": true, "UG-N": true,
+ "UG-W": true, "UG-322": true, "UG-323": true, "UG-420": true, "UG-117": true,
+ "UG-118": true, "UG-225": true, "UG-120": true, "UG-226": true,
+ "UG-121": true, "UG-122": true, "UG-227": true, "UG-421": true,
+ "UG-325": true, "UG-228": true, "UG-123": true, "UG-422": true,
+ "UG-326": true, "UG-229": true, "UG-124": true, "UG-423": true,
+ "UG-230": true, "UG-327": true, "UG-424": true, "UG-328": true,
+ "UG-425": true, "UG-426": true, "UG-330": true,
+ "UM-67": true, "UM-71": true, "UM-76": true, "UM-79": true,
+ "UM-81": true, "UM-84": true, "UM-86": true, "UM-89": true, "UM-95": true,
+ "US-AK": true, "US-AL": true, "US-AR": true, "US-AS": true, "US-AZ": true,
+ "US-CA": true, "US-CO": true, "US-CT": true, "US-DC": true, "US-DE": true,
+ "US-FL": true, "US-GA": true, "US-GU": true, "US-HI": true, "US-IA": true,
+ "US-ID": true, "US-IL": true, "US-IN": true, "US-KS": true, "US-KY": true,
+ "US-LA": true, "US-MA": true, "US-MD": true, "US-ME": true, "US-MI": true,
+ "US-MN": true, "US-MO": true, "US-MP": true, "US-MS": true, "US-MT": true,
+ "US-NC": true, "US-ND": true, "US-NE": true, "US-NH": true, "US-NJ": true,
+ "US-NM": true, "US-NV": true, "US-NY": true, "US-OH": true, "US-OK": true,
+ "US-OR": true, "US-PA": true, "US-PR": true, "US-RI": true, "US-SC": true,
+ "US-SD": true, "US-TN": true, "US-TX": true, "US-UM": true, "US-UT": true,
+ "US-VA": true, "US-VI": true, "US-VT": true, "US-WA": true, "US-WI": true,
+ "US-WV": true, "US-WY": true, "UY-AR": true, "UY-CA": true, "UY-CL": true,
+ "UY-CO": true, "UY-DU": true, "UY-FD": true, "UY-FS": true, "UY-LA": true,
+ "UY-MA": true, "UY-MO": true, "UY-PA": true, "UY-RN": true, "UY-RO": true,
+ "UY-RV": true, "UY-SA": true, "UY-SJ": true, "UY-SO": true, "UY-TA": true,
+ "UY-TT": true, "UZ-AN": true, "UZ-BU": true, "UZ-FA": true, "UZ-JI": true,
+ "UZ-NG": true, "UZ-NW": true, "UZ-QA": true, "UZ-QR": true, "UZ-SA": true,
+ "UZ-SI": true, "UZ-SU": true, "UZ-TK": true, "UZ-TO": true, "UZ-XO": true,
+ "VC-01": true, "VC-02": true, "VC-03": true, "VC-04": true, "VC-05": true,
+ "VC-06": true, "VE-A": true, "VE-B": true, "VE-C": true, "VE-D": true,
+ "VE-E": true, "VE-F": true, "VE-G": true, "VE-H": true, "VE-I": true,
+ "VE-J": true, "VE-K": true, "VE-L": true, "VE-M": true, "VE-N": true,
+ "VE-O": true, "VE-P": true, "VE-R": true, "VE-S": true, "VE-T": true,
+ "VE-U": true, "VE-V": true, "VE-W": true, "VE-X": true, "VE-Y": true,
+ "VE-Z": true, "VN-01": true, "VN-02": true, "VN-03": true, "VN-04": true,
+ "VN-05": true, "VN-06": true, "VN-07": true, "VN-09": true, "VN-13": true,
+ "VN-14": true, "VN-15": true, "VN-18": true, "VN-20": true, "VN-21": true,
+ "VN-22": true, "VN-23": true, "VN-24": true, "VN-25": true, "VN-26": true,
+ "VN-27": true, "VN-28": true, "VN-29": true, "VN-30": true, "VN-31": true,
+ "VN-32": true, "VN-33": true, "VN-34": true, "VN-35": true, "VN-36": true,
+ "VN-37": true, "VN-39": true, "VN-40": true, "VN-41": true, "VN-43": true,
+ "VN-44": true, "VN-45": true, "VN-46": true, "VN-47": true, "VN-49": true,
+ "VN-50": true, "VN-51": true, "VN-52": true, "VN-53": true, "VN-54": true,
+ "VN-55": true, "VN-56": true, "VN-57": true, "VN-58": true, "VN-59": true,
+ "VN-61": true, "VN-63": true, "VN-66": true, "VN-67": true, "VN-68": true,
+ "VN-69": true, "VN-70": true, "VN-71": true, "VN-72": true, "VN-73": true,
+ "VN-CT": true, "VN-DN": true, "VN-HN": true, "VN-HP": true, "VN-SG": true,
+ "VU-MAP": true, "VU-PAM": true, "VU-SAM": true, "VU-SEE": true, "VU-TAE": true,
+ "VU-TOB": true, "WF-SG": true,"WF-UV": true, "WS-AA": true, "WS-AL": true, "WS-AT": true, "WS-FA": true,
+ "WS-GE": true, "WS-GI": true, "WS-PA": true, "WS-SA": true, "WS-TU": true,
+ "WS-VF": true, "WS-VS": true, "YE-AB": true, "YE-AD": true, "YE-AM": true,
+ "YE-BA": true, "YE-DA": true, "YE-DH": true, "YE-HD": true, "YE-HJ": true, "YE-HU": true,
+ "YE-IB": true, "YE-JA": true, "YE-LA": true, "YE-MA": true, "YE-MR": true,
+ "YE-MU": true, "YE-MW": true, "YE-RA": true, "YE-SA": true, "YE-SD": true, "YE-SH": true,
+ "YE-SN": true, "YE-TA": true, "ZA-EC": true, "ZA-FS": true, "ZA-GP": true,
+ "ZA-LP": true, "ZA-MP": true, "ZA-NC": true, "ZA-NW": true, "ZA-WC": true,
+ "ZA-ZN": true, "ZA-KZN": true, "ZM-01": true, "ZM-02": true, "ZM-03": true, "ZM-04": true,
+ "ZM-05": true, "ZM-06": true, "ZM-07": true, "ZM-08": true, "ZM-09": true, "ZM-10": true,
+ "ZW-BU": true, "ZW-HA": true, "ZW-MA": true, "ZW-MC": true, "ZW-ME": true,
+ "ZW-MI": true, "ZW-MN": true, "ZW-MS": true, "ZW-MV": true, "ZW-MW": true,
}
diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go
index 41784620db..f31a7d538a 100644
--- a/vendor/github.com/go-playground/validator/v10/doc.go
+++ b/vendor/github.com/go-playground/validator/v10/doc.go
@@ -146,7 +146,7 @@ use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
so the above will become excludesall=0x7C
type Test struct {
- Field `validate:"excludesall=|"` // BAD! Do not include a a pipe!
+ Field `validate:"excludesall=|"` // BAD! Do not include a pipe!
Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
}
@@ -239,7 +239,7 @@ Example #2
map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required"
// gt=0 will be applied to the map itself
- // eq=1|eq=2 will be applied to each array element in the the map keys
+ // eq=1|eq=2 will be applied to each array element in the map keys
// required will be applied to map values
# Required
@@ -916,7 +916,7 @@ this with the omitempty tag.
# Base64URL String
This validates that a string value contains a valid base64 URL safe value
-according the the RFC4648 spec.
+according the RFC4648 spec.
Although an empty string is a valid base64 URL safe value, this will report
an empty string as an error, if you wish to accept an empty string as valid
you can use this with the omitempty tag.
@@ -927,7 +927,7 @@ you can use this with the omitempty tag.
# Base64RawURL String
This validates that a string value contains a valid base64 URL safe value,
-but without = padding, according the the RFC4648 spec, section 3.2.
+but without = padding, according the RFC4648 spec, section 3.2.
Although an empty string is a valid base64 URL safe value, this will report
an empty string as an error, if you wish to accept an empty string as valid
you can use this with the omitempty tag.
@@ -1361,7 +1361,7 @@ More information on https://cve.mitre.org/
# Credit Card
-This validates that a string value contains a valid credit card number using Luhn algoritm.
+This validates that a string value contains a valid credit card number using Luhn algorithm.
Usage: credit_card
@@ -1372,8 +1372,7 @@ This validates that a string value contains a valid credit card number using Luh
This validates that a string or (u)int value contains a valid checksum using the Luhn algorithm.
-
-#MongoDb ObjectID
+# MongoDb ObjectID
This validates that a string is a valid 24 character hexadecimal string.
diff --git a/vendor/github.com/go-playground/validator/v10/struct_level.go b/vendor/github.com/go-playground/validator/v10/struct_level.go
index c0d89cfbed..271328f710 100644
--- a/vendor/github.com/go-playground/validator/v10/struct_level.go
+++ b/vendor/github.com/go-playground/validator/v10/struct_level.go
@@ -62,7 +62,7 @@ type StructLevel interface {
// existing namespace that validator is on.
// e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
// on the nesting. most of the time they will be blank, unless you validate
- // at a level lower the the current field depth
+ // at a level lower the current field depth
ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
}
@@ -74,7 +74,7 @@ var _ StructLevel = new(validate)
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an acurate value otherwise.
+// should not be relied upon for an accurate value otherwise.
func (v *validate) Top() reflect.Value {
return v.top
}
@@ -85,7 +85,7 @@ func (v *validate) Top() reflect.Value {
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an acurate value otherwise.
+// should not be relied upon for an accurate value otherwise.
func (v *validate) Parent() reflect.Value {
return v.slflParent
}
diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go
index 36da85514e..3925cfe1cd 100644
--- a/vendor/github.com/go-playground/validator/v10/util.go
+++ b/vendor/github.com/go-playground/validator/v10/util.go
@@ -234,7 +234,7 @@ func asInt(param string) int64 {
func asIntFromTimeDuration(param string) int64 {
d, err := time.ParseDuration(param)
if err != nil {
- // attempt parsing as an an integer assuming nanosecond precision
+ // attempt parsing as an integer assuming nanosecond precision
return asInt(param)
}
return int64(d)
diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go
index 51ae1aa04f..d2ee8fe38b 100644
--- a/vendor/github.com/go-playground/validator/v10/validator_instance.go
+++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go
@@ -29,6 +29,7 @@ const (
requiredWithAllTag = "required_with_all"
requiredIfTag = "required_if"
requiredUnlessTag = "required_unless"
+ skipUnlessTag = "skip_unless"
excludedWithoutAllTag = "excluded_without_all"
excludedWithoutTag = "excluded_without"
excludedWithTag = "excluded_with"
@@ -57,7 +58,7 @@ var (
// FilterFunc is the type used to filter fields using
// StructFiltered(...) function.
-// returning true results in the field being filtered/skiped from
+// returning true results in the field being filtered/skipped from
// validation
type FilterFunc func(ns []byte) bool
@@ -123,7 +124,8 @@ func New() *Validate {
switch k {
// these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag,
- excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag:
+ excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag,
+ skipUnlessTag:
_ = v.registerValidation(k, wrapFunc(val), true, true)
default:
// no need to error check here, baked in will always be valid
@@ -151,7 +153,7 @@ func (v *Validate) SetTagName(name string) {
}
// ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual
-// validation validation information via context.Context.
+// validation information via context.Context.
func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
errs := make(map[string]interface{})
for field, rule := range rules {
@@ -451,7 +453,7 @@ func (v *Validate) StructPartial(s interface{}, fields ...string) error {
}
// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
-// validation validation information via context.Context
+// validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
@@ -541,7 +543,7 @@ func (v *Validate) StructExcept(s interface{}, fields ...string) error {
}
// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
-// validation validation information via context.Context
+// validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
index c4a2e693e3..c049c1ef48 100644
--- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
+++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go
@@ -15,6 +15,7 @@
package name
import (
+ // nolint: depguard
_ "crypto/sha256" // Recommended by go-digest.
"strings"
diff --git a/vendor/github.com/google/trillian/client/admin.go b/vendor/github.com/google/trillian/client/admin.go
new file mode 100644
index 0000000000..e48416e5ef
--- /dev/null
+++ b/vendor/github.com/google/trillian/client/admin.go
@@ -0,0 +1,124 @@
+// Copyright 2018 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/google/trillian"
+ "github.com/google/trillian/client/backoff"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "k8s.io/klog/v2"
+)
+
+// CreateAndInitTree uses the adminClient and logClient to create the tree
+// described by req.
+// If req describes a LOG tree, then this function will also call the InitLog
+// function using logClient.
+// Internally, the function will continue to retry failed requests until either
+// the tree is created (and if necessary, initialised) successfully, or ctx is
+// cancelled.
+func CreateAndInitTree(
+ ctx context.Context,
+ req *trillian.CreateTreeRequest,
+ adminClient trillian.TrillianAdminClient,
+ logClient trillian.TrillianLogClient) (*trillian.Tree, error) {
+ b := &backoff.Backoff{
+ Min: 100 * time.Millisecond,
+ Max: 10 * time.Second,
+ Factor: 2,
+ Jitter: true,
+ }
+
+ var tree *trillian.Tree
+ err := b.Retry(ctx, func() error {
+ klog.Info("CreateTree...")
+ var err error
+ tree, err = adminClient.CreateTree(ctx, req)
+ switch code := status.Code(err); code {
+ case codes.Unavailable:
+ klog.Errorf("Admin server unavailable: %v", err)
+ return err
+ case codes.OK:
+ return nil
+ default:
+ klog.Errorf("failed to CreateTree(%+v): %T %v", req, err, err)
+ return err
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ switch tree.TreeType {
+ case trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG:
+ if err := InitLog(ctx, tree, logClient); err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("don't know how or whether to initialise tree type %v", tree.TreeType)
+ }
+
+ return tree, nil
+}
+
+// InitLog initialises a freshly created Log tree.
+func InitLog(ctx context.Context, tree *trillian.Tree, logClient trillian.TrillianLogClient) error {
+ if tree.TreeType != trillian.TreeType_LOG &&
+ tree.TreeType != trillian.TreeType_PREORDERED_LOG {
+ return fmt.Errorf("InitLog called with tree of type %v", tree.TreeType)
+ }
+
+ b := &backoff.Backoff{
+ Min: 100 * time.Millisecond,
+ Max: 10 * time.Second,
+ Factor: 2,
+ Jitter: true,
+ }
+
+ err := b.Retry(ctx, func() error {
+ klog.Infof("Initialising Log %v...", tree.TreeId)
+ req := &trillian.InitLogRequest{LogId: tree.TreeId}
+ resp, err := logClient.InitLog(ctx, req)
+ switch code := status.Code(err); code {
+ case codes.Unavailable:
+ klog.Errorf("Log server unavailable: %v", err)
+ return err
+ case codes.AlreadyExists:
+ klog.Warningf("Bizarrely, the just-created Log (%v) is already initialised!: %v", tree.TreeId, err)
+ return err
+ case codes.OK:
+ klog.Infof("Initialised Log (%v) with new SignedTreeHead:\n%+v",
+ tree.TreeId, resp.Created)
+ return nil
+ default:
+ klog.Errorf("failed to InitLog(%+v): %T %v", req, err, err)
+ return err
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ // Wait for log root to become available.
+ return b.Retry(ctx, func() error {
+ _, err := logClient.GetLatestSignedLogRoot(ctx,
+ &trillian.GetLatestSignedLogRootRequest{LogId: tree.TreeId})
+ return err
+ }, codes.FailedPrecondition)
+}
diff --git a/vendor/github.com/google/trillian/client/backoff/backoff.go b/vendor/github.com/google/trillian/client/backoff/backoff.go
new file mode 100644
index 0000000000..c2b4acb281
--- /dev/null
+++ b/vendor/github.com/google/trillian/client/backoff/backoff.go
@@ -0,0 +1,130 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package backoff allows retrying an operation with backoff.
+package backoff
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "time"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// RetriableError explicitly instructs Backoff to retry.
+type RetriableError string
+
+// Error returns string representation of the retriable error.
+func (re RetriableError) Error() string {
+ return string(re)
+}
+
+// RetriableErrorf wraps a formatted string into a RetriableError.
+func RetriableErrorf(format string, a ...interface{}) error {
+ return RetriableError(fmt.Sprintf(format, a...))
+}
+
+// Backoff specifies the parameters of the backoff algorithm. Works correctly
+// if 0 < Min <= Max <= 2^62 (nanosec), and Factor >= 1.
+type Backoff struct {
+ Min time.Duration // Duration of the first pause.
+ Max time.Duration // Max duration of a pause.
+ Factor float64 // The factor of duration increase between iterations.
+ Jitter bool // Add random noise to pauses.
+
+ delta time.Duration // Current pause duration relative to Min, no jitter.
+}
+
+// Duration returns the time to wait on current retry iteration. Every time
+// Duration is called, the returned value will exponentially increase by Factor
+// until Backoff.Max. If Jitter is enabled, will add an additional random value
+// between 0 and the duration, so the result can at most double.
+func (b *Backoff) Duration() time.Duration {
+ base := b.Min + b.delta
+ pause := base
+ if b.Jitter { // Add a number in the range [0, pause).
+ pause += time.Duration(rand.Int63n(int64(pause)))
+ }
+
+ nextPause := time.Duration(float64(base) * b.Factor)
+ if nextPause > b.Max || nextPause < b.Min { // Multiplication could overflow.
+ nextPause = b.Max
+ }
+ b.delta = nextPause - b.Min
+
+ return pause
+}
+
+// Reset sets the internal state back to first retry iteration.
+func (b *Backoff) Reset() {
+ b.delta = 0
+}
+
+// Retry calls a function until it succeeds or the context is done.
+// It will backoff if the function returns a retryable error.
+// Once the context is done, retries will end and the most recent error will be returned.
+// Backoff is not reset by this function.
+func (b *Backoff) Retry(ctx context.Context, f func() error, retry ...codes.Code) error {
+ // If the context is already done, don't make any attempts to call f.
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ // Try calling f while the error is retryable and ctx is not done.
+ for {
+ if err := f(); !IsRetryable(err, retry...) {
+ return err
+ }
+ select {
+ case <-time.After(b.Duration()):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
+
+// IsRetryable returns false unless the error is explicitly retriable per
+// https://godoc.org/google.golang.org/grpc/codes,
+// or if the error codes is in retry. codes.OK is not retryable.
+func IsRetryable(err error, retry ...codes.Code) bool {
+ code := status.Code(err)
+ switch code {
+ // Fast path.
+ case codes.OK:
+ return false
+
+ // Debatable cases:
+ case codes.DeadlineExceeded,
+ codes.ResourceExhausted: // Retry with backoff.
+ return true
+
+ // Errors that are explicitly retryable:
+ case codes.Unavailable, // Client can just retry the call.
+ codes.Aborted: // Client can retry the read-modify-write function.
+ return true
+ }
+
+ for _, c := range retry {
+ if code == c {
+ return true
+ }
+ }
+
+ // Don't retry for all other errors, unless it is a RetriableError.
+ _, ok := err.(RetriableError)
+ return ok
+}
diff --git a/vendor/github.com/google/trillian/client/log_client.go b/vendor/github.com/google/trillian/client/log_client.go
new file mode 100644
index 0000000000..a13a916de4
--- /dev/null
+++ b/vendor/github.com/google/trillian/client/log_client.go
@@ -0,0 +1,343 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package client verifies responses from the Trillian log.
+package client
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/google/trillian"
+ "github.com/google/trillian/client/backoff"
+ "github.com/google/trillian/types"
+ "github.com/transparency-dev/merkle"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// LogClient represents a client for a given Trillian log instance.
+type LogClient struct {
+ *LogVerifier
+ LogID int64
+ MinMergeDelay time.Duration
+ client trillian.TrillianLogClient
+ root types.LogRootV1
+ rootLock sync.Mutex
+ updateLock sync.Mutex
+}
+
+// New returns a new LogClient.
+func New(logID int64, client trillian.TrillianLogClient, verifier *LogVerifier, root types.LogRootV1) *LogClient {
+ return &LogClient{
+ LogVerifier: verifier,
+ LogID: logID,
+ client: client,
+ root: root,
+ }
+}
+
+// NewFromTree creates a new LogClient given a tree config.
+func NewFromTree(client trillian.TrillianLogClient, config *trillian.Tree, root types.LogRootV1) (*LogClient, error) {
+ verifier, err := NewLogVerifierFromTree(config)
+ if err != nil {
+ return nil, err
+ }
+
+ return New(config.GetTreeId(), client, verifier, root), nil
+}
+
+// AddLeaf adds leaf to the append only log.
+// Blocks and continuously updates the trusted root until a successful inclusion proof
+// can be retrieved.
+func (c *LogClient) AddLeaf(ctx context.Context, data []byte) error {
+ if err := c.QueueLeaf(ctx, data); err != nil {
+ return fmt.Errorf("QueueLeaf(): %v", err)
+ }
+ if err := c.WaitForInclusion(ctx, data); err != nil {
+ return fmt.Errorf("WaitForInclusion(): %v", err)
+ }
+ return nil
+}
+
+// ListByIndex returns the requested leaves by index.
+func (c *LogClient) ListByIndex(ctx context.Context, start, count int64) ([]*trillian.LogLeaf, error) {
+ resp, err := c.client.GetLeavesByRange(ctx,
+ &trillian.GetLeavesByRangeRequest{
+ LogId: c.LogID,
+ StartIndex: start,
+ Count: count,
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Verify that we got back the requested leaves.
+ if len(resp.Leaves) < int(count) {
+ return nil, fmt.Errorf("len(Leaves)=%d, want %d", len(resp.Leaves), count)
+ }
+ for i, l := range resp.Leaves {
+ if want := start + int64(i); l.LeafIndex != want {
+ return nil, fmt.Errorf("Leaves[%d].LeafIndex=%d, want %d", i, l.LeafIndex, want)
+ }
+ }
+
+ return resp.Leaves, nil
+}
+
+// WaitForRootUpdate repeatedly fetches the latest root until there is an
+// update, which it then applies, or until ctx times out.
+func (c *LogClient) WaitForRootUpdate(ctx context.Context) (*types.LogRootV1, error) {
+ b := &backoff.Backoff{
+ Min: 100 * time.Millisecond,
+ Max: 10 * time.Second,
+ Factor: 2,
+ Jitter: true,
+ }
+
+ for {
+ newTrusted, err := c.UpdateRoot(ctx)
+ switch status.Code(err) {
+ case codes.OK:
+ if newTrusted != nil {
+ return newTrusted, nil
+ }
+ case codes.Unavailable, codes.NotFound, codes.FailedPrecondition:
+ // Retry.
+ default:
+ return nil, err
+ }
+
+ select {
+ case <-ctx.Done():
+ return nil, status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err())
+ case <-time.After(b.Duration()):
+ }
+ }
+}
+
+// getAndVerifyLatestRoot fetches and verifies the latest root against a trusted root, seen in the past.
+// Pass nil for trusted if this is the first time querying this log.
+func (c *LogClient) getAndVerifyLatestRoot(ctx context.Context, trusted *types.LogRootV1) (*types.LogRootV1, error) {
+ resp, err := c.client.GetLatestSignedLogRoot(ctx,
+ &trillian.GetLatestSignedLogRootRequest{
+ LogId: c.LogID,
+ FirstTreeSize: int64(trusted.TreeSize),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(gbelvin): Turn on root verification.
+ /*
+ logRoot, err := c.VerifyRoot(&types.LogRootV1{}, resp.GetSignedLogRoot(), nil)
+ if err != nil {
+ return nil, err
+ }
+ */
+ // TODO(gbelvin): Remove this hack when all implementations store digital signatures.
+ var logRoot types.LogRootV1
+ if err := logRoot.UnmarshalBinary(resp.GetSignedLogRoot().LogRoot); err != nil {
+ return nil, err
+ }
+
+ if trusted.TreeSize > 0 &&
+ logRoot.TreeSize == trusted.TreeSize &&
+ bytes.Equal(logRoot.RootHash, trusted.RootHash) {
+ // Tree has not been updated.
+ return &logRoot, nil
+ }
+
+ // Verify root update if the tree / the latest signed log root isn't empty.
+ if logRoot.TreeSize > 0 {
+ if _, err := c.VerifyRoot(trusted, resp.GetSignedLogRoot(), resp.GetProof().GetHashes()); err != nil {
+ return nil, err
+ }
+ }
+ return &logRoot, nil
+}
+
+// GetRoot returns a copy of the latest trusted root.
+func (c *LogClient) GetRoot() *types.LogRootV1 {
+ c.rootLock.Lock()
+ defer c.rootLock.Unlock()
+
+ // Copy the internal trusted root in order to prevent clients from modifying it.
+ ret := c.root
+ return &ret
+}
+
+// UpdateRoot retrieves the current SignedLogRoot, verifying it against roots this client has
+// seen in the past, and updating the currently trusted root if the new root verifies, and is
+// newer than the currently trusted root.
+func (c *LogClient) UpdateRoot(ctx context.Context) (*types.LogRootV1, error) {
+ // Only one root update should be running at any point in time, because
+ // the update involves a consistency proof from the old value, and if the
+ // old value could change along the way (in another goroutine) then the
+ // result could be inconsistent.
+ //
+ // For example, if the current root is A and two root updates A->B and A->C
+ // happen in parallel, then we might end up with the transitions A->B->C:
+ // cur := A cur := A
+ // getRoot() => B getRoot() => C
+ // proof(A->B) ok proof(A->C) ok
+ // c.root = B
+ // c.root = C
+ // and the last step (B->C) has no proof and so could hide a forked tree.
+ c.updateLock.Lock()
+ defer c.updateLock.Unlock()
+
+ currentlyTrusted := c.GetRoot()
+ newTrusted, err := c.getAndVerifyLatestRoot(ctx, currentlyTrusted)
+ if err != nil {
+ return nil, err
+ }
+
+ // Lock "rootLock" for the "root" update.
+ c.rootLock.Lock()
+ defer c.rootLock.Unlock()
+
+ if newTrusted.TimestampNanos > currentlyTrusted.TimestampNanos &&
+ newTrusted.TreeSize >= currentlyTrusted.TreeSize {
+
+ // Take a copy of the new trusted root in order to prevent clients from modifying it.
+ c.root = *newTrusted
+
+ return newTrusted, nil
+ }
+
+ return nil, nil
+}
+
+// WaitForInclusion blocks until the requested data has been verified with an
+// inclusion proof.
+//
+// It will continuously update the root to the latest one available until the
+// data is found, or an error is returned.
+//
+// It is best to call this method with a context that will timeout to avoid
+// waiting forever.
+func (c *LogClient) WaitForInclusion(ctx context.Context, data []byte) error {
+ leaf := prepareLeaf(c.hasher, data)
+
+ // If a minimum merge delay has been configured, wait at least that long before
+ // starting to poll
+ if c.MinMergeDelay > 0 {
+ select {
+ case <-ctx.Done():
+ return status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err())
+ case <-time.After(c.MinMergeDelay):
+ }
+ }
+
+ var root *types.LogRootV1
+ for {
+ root = c.GetRoot()
+
+ // It is illegal to ask for an inclusion proof with TreeSize = 0.
+ if root.TreeSize >= 1 {
+ ok, err := c.getAndVerifyInclusionProof(ctx, leaf.MerkleLeafHash, root)
+ if err != nil && status.Code(err) != codes.NotFound {
+ return err
+ } else if ok {
+ return nil
+ }
+ }
+
+ // If not found or tree is empty, wait for a root update before retrying again.
+ if _, err := c.WaitForRootUpdate(ctx); err != nil {
+ return err
+ }
+
+ // Retry
+ }
+}
+
+func (c *LogClient) getAndVerifyInclusionProof(ctx context.Context, leafHash []byte, sth *types.LogRootV1) (bool, error) {
+ resp, err := c.client.GetInclusionProofByHash(ctx,
+ &trillian.GetInclusionProofByHashRequest{
+ LogId: c.LogID,
+ LeafHash: leafHash,
+ TreeSize: int64(sth.TreeSize),
+ })
+ if err != nil {
+ return false, err
+ }
+ if len(resp.Proof) < 1 {
+ return false, nil
+ }
+ for _, proof := range resp.Proof {
+ if err := c.VerifyInclusionByHash(sth, leafHash, proof); err != nil {
+ return false, fmt.Errorf("VerifyInclusionByHash(): %v", err)
+ }
+ }
+ return true, nil
+}
+
+// AddSequencedLeaves adds any number of pre-sequenced leaves to the log.
+// Indexes must be contiguous.
+func (c *LogClient) AddSequencedLeaves(ctx context.Context, dataByIndex map[int64][]byte) error {
+ if len(dataByIndex) == 0 {
+ return nil
+ }
+ leaves := make([]*trillian.LogLeaf, 0, len(dataByIndex))
+ indexes := make([]int64, 0, len(dataByIndex))
+ for index := range dataByIndex {
+ indexes = append(indexes, index)
+ }
+ sort.Slice(indexes, func(a, b int) bool { return indexes[a] < indexes[b] })
+
+ for i, index := range indexes {
+ // Check index continuity.
+ if want := indexes[0] + int64(i); index != want {
+ return fmt.Errorf("missing index in contiugous index range. got: %v, want: %v", index, want)
+ }
+ leaf := prepareLeaf(c.hasher, dataByIndex[index])
+ leaf.LeafIndex = index
+ leaves = append(leaves, leaf)
+ }
+ resp, err := c.client.AddSequencedLeaves(ctx, &trillian.AddSequencedLeavesRequest{
+ LogId: c.LogID,
+ Leaves: leaves,
+ })
+ for _, leaf := range resp.GetResults() {
+ if s := status.FromProto(leaf.GetStatus()); s.Code() != codes.OK && s.Code() != codes.AlreadyExists {
+ return status.Errorf(s.Code(), "unexpected fail status in AddSequencedLeaves: %+v, err: %v", leaf, s.Message())
+ }
+ }
+ return err
+}
+
+// QueueLeaf adds a leaf to a Trillian log without blocking.
+// AlreadyExists is considered a success case by this function.
+func (c *LogClient) QueueLeaf(ctx context.Context, data []byte) error {
+ leaf := prepareLeaf(c.hasher, data)
+ _, err := c.client.QueueLeaf(ctx, &trillian.QueueLeafRequest{
+ LogId: c.LogID,
+ Leaf: leaf,
+ })
+ return err
+}
+
+// prepareLeaf returns a trillian.LogLeaf prepopulated with leaf data and hash.
+func prepareLeaf(hasher merkle.LogHasher, data []byte) *trillian.LogLeaf {
+ leafHash := hasher.HashLeaf(data)
+ return &trillian.LogLeaf{
+ LeafValue: data,
+ MerkleLeafHash: leafHash,
+ }
+}
diff --git a/vendor/github.com/google/trillian/client/log_verifier.go b/vendor/github.com/google/trillian/client/log_verifier.go
new file mode 100644
index 0000000000..3e8ecfff11
--- /dev/null
+++ b/vendor/github.com/google/trillian/client/log_verifier.go
@@ -0,0 +1,91 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/trillian"
+ "github.com/google/trillian/types"
+ "github.com/transparency-dev/merkle"
+ "github.com/transparency-dev/merkle/proof"
+ "github.com/transparency-dev/merkle/rfc6962"
+)
+
+// LogVerifier allows verification of output from Trillian Logs, both regular
+// and pre-ordered; it is safe for concurrent use (as its contents are fixed
+// after construction).
+type LogVerifier struct {
+ // hasher is the hash strategy used to compute nodes in the Merkle tree.
+ hasher merkle.LogHasher
+}
+
+// NewLogVerifier returns an object that can verify output from Trillian Logs.
+func NewLogVerifier(hasher merkle.LogHasher) *LogVerifier {
+ return &LogVerifier{hasher: hasher}
+}
+
+// NewLogVerifierFromTree creates a new LogVerifier using the algorithms
+// specified by a Trillian Tree object.
+func NewLogVerifierFromTree(config *trillian.Tree) (*LogVerifier, error) {
+ if config == nil {
+ return nil, errors.New("client: NewLogVerifierFromTree(): nil config")
+ }
+ log, pLog := trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG
+ if got := config.TreeType; got != log && got != pLog {
+ return nil, fmt.Errorf("client: NewLogVerifierFromTree(): TreeType: %v, want %v or %v", got, log, pLog)
+ }
+
+ return NewLogVerifier(rfc6962.DefaultHasher), nil
+}
+
+// VerifyRoot verifies that newRoot is a valid append-only operation from
+// trusted. If trusted.TreeSize is zero, a consistency proof is not needed.
+func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *trillian.SignedLogRoot, consistency [][]byte) (*types.LogRootV1, error) {
+ if trusted == nil {
+ return nil, fmt.Errorf("VerifyRoot() error: trusted == nil")
+ }
+ if newRoot == nil {
+ return nil, fmt.Errorf("VerifyRoot() error: newRoot == nil")
+ }
+
+ var r types.LogRootV1
+ if err := r.UnmarshalBinary(newRoot.LogRoot); err != nil {
+ return nil, err
+ }
+
+ // Implicitly trust the first root we get.
+ if trusted.TreeSize != 0 {
+ // Verify consistency proof.
+ if err := proof.VerifyConsistency(c.hasher, trusted.TreeSize, r.TreeSize, consistency, trusted.RootHash, r.RootHash); err != nil {
+ return nil, fmt.Errorf("failed to verify consistency proof from %d->%d %x->%x: %v", trusted.TreeSize, r.TreeSize, trusted.RootHash, r.RootHash, err)
+ }
+ }
+ return &r, nil
+}
+
+// VerifyInclusionByHash verifies that the inclusion proof for the given Merkle leafHash
+// matches the given trusted root.
+func (c *LogVerifier) VerifyInclusionByHash(trusted *types.LogRootV1, leafHash []byte, pf *trillian.Proof) error {
+ if trusted == nil {
+ return fmt.Errorf("VerifyInclusionByHash() error: trusted == nil")
+ }
+ if pf == nil {
+ return fmt.Errorf("VerifyInclusionByHash() error: proof == nil")
+ }
+
+ return proof.VerifyInclusion(c.hasher, uint64(pf.LeafIndex), trusted.TreeSize, leafHash, pf.Hashes, trusted.RootHash)
+}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 55c8ca4475..efab55e655 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,15 @@ This package provides various compression algorithms.
# changelog
+* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
+ * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
+ * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
+ * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
+ * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
+ * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
+ * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
+ * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 512ffe5b95..55a388553d 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
}
func (r *readerWrapper) readByte() (byte, error) {
- n2, err := r.r.Read(r.tmp[:1])
+ n2, err := io.ReadFull(r.r, r.tmp[:1])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
diff --git a/vendor/github.com/leodido/go-urn/makefile b/vendor/github.com/leodido/go-urn/makefile
index 2e482fa17b..d088c044e6 100644
--- a/vendor/github.com/leodido/go-urn/makefile
+++ b/vendor/github.com/leodido/go-urn/makefile
@@ -17,7 +17,7 @@ images: docs/urn.png
.PHONY: removecomments
removecomments:
- @go build ./tools/removecomments
+ @cd ./tools/removecomments; go build -o ../../removecomments ./main.go
machine.go: machine.go.rl
diff --git a/vendor/github.com/opencontainers/image-spec/schema/config-schema.json b/vendor/github.com/opencontainers/image-spec/schema/config-schema.json
index 5bd411f136..0c325f1e08 100644
--- a/vendor/github.com/opencontainers/image-spec/schema/config-schema.json
+++ b/vendor/github.com/opencontainers/image-spec/schema/config-schema.json
@@ -1,6 +1,6 @@
{
"description": "OpenContainer Config Specification",
- "$schema": "https://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/config",
"type": "object",
"properties": {
diff --git a/vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json b/vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json
index b64ca1313d..b2f6deeaf4 100644
--- a/vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json
+++ b/vendor/github.com/opencontainers/image-spec/schema/content-descriptor.json
@@ -1,6 +1,6 @@
{
"description": "OpenContainer Content Descriptor Specification",
- "$schema": "https://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/descriptor",
"type": "object",
"properties": {
@@ -20,6 +20,14 @@
"description": "a list of urls from which this object may be downloaded",
"$ref": "defs-descriptor.json#/definitions/urls"
},
+ "data": {
+ "description": "an embedding of the targeted content (base64 encoded)",
+ "$ref": "defs.json#/definitions/base64"
+ },
+ "artifactType": {
+ "description": "the IANA media type of this artifact",
+ "$ref": "defs-descriptor.json#/definitions/mediaType"
+ },
"annotations": {
"id": "https://opencontainers.org/schema/descriptor/annotations",
"$ref": "defs-descriptor.json#/definitions/annotations"
diff --git a/vendor/github.com/opencontainers/image-spec/schema/defs.json b/vendor/github.com/opencontainers/image-spec/schema/defs.json
index 03cb495b86..220f5d47db 100644
--- a/vendor/github.com/opencontainers/image-spec/schema/defs.json
+++ b/vendor/github.com/opencontainers/image-spec/schema/defs.json
@@ -61,6 +61,12 @@
}
]
},
+ "base64": {
+ "type": "string",
+ "media": {
+ "binaryEncoding": "base64"
+ }
+ },
"stringPointer": {
"oneOf": [
{
diff --git a/vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json b/vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json
index e99131bf01..3d56767d4c 100644
--- a/vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json
+++ b/vendor/github.com/opencontainers/image-spec/schema/image-index-schema.json
@@ -1,6 +1,6 @@
{
"description": "OpenContainer Image Index Specification",
- "$schema": "https://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/index",
"type": "object",
"properties": {
diff --git a/vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json b/vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json
index 714ed82a25..874d2174c7 100644
--- a/vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json
+++ b/vendor/github.com/opencontainers/image-spec/schema/image-layout-schema.json
@@ -1,6 +1,6 @@
{
"description": "OpenContainer Image Layout Schema",
- "$schema": "https://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/layout",
"type": "object",
"properties": {
diff --git a/vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json b/vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json
index 8286376eb9..9bce5a1502 100644
--- a/vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json
+++ b/vendor/github.com/opencontainers/image-spec/schema/image-manifest-schema.json
@@ -1,6 +1,6 @@
{
"description": "OpenContainer Image Manifest Specification",
- "$schema": "https://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
"id": "https://opencontainers.org/schema/image/manifest",
"type": "object",
"properties": {
@@ -15,9 +15,16 @@
"description": "the mediatype of the referenced object",
"$ref": "defs-descriptor.json#/definitions/mediaType"
},
+ "artifactType": {
+ "description": "the artifact mediatype of the referenced object",
+ "$ref": "defs-descriptor.json#/definitions/mediaType"
+ },
"config": {
"$ref": "content-descriptor.json"
},
+ "subject": {
+ "$ref": "content-descriptor.json"
+ },
"layers": {
"type": "array",
"minItems": 1,
diff --git a/vendor/github.com/opencontainers/image-spec/schema/validator.go b/vendor/github.com/opencontainers/image-spec/schema/validator.go
index e219d38c25..41ca58b504 100644
--- a/vendor/github.com/opencontainers/image-spec/schema/validator.go
+++ b/vendor/github.com/opencontainers/image-spec/schema/validator.go
@@ -92,7 +92,7 @@ func (v Validator) Validate(src io.Reader) error {
type unimplemented string
-func (v unimplemented) Validate(src io.Reader) error {
+func (v unimplemented) Validate(_ io.Reader) error {
return fmt.Errorf("%s: unimplemented", v)
}
@@ -117,9 +117,9 @@ func validateManifest(r io.Reader) error {
if layer.MediaType != string(v1.MediaTypeImageLayer) &&
layer.MediaType != string(v1.MediaTypeImageLayerGzip) &&
layer.MediaType != string(v1.MediaTypeImageLayerZstd) &&
- layer.MediaType != string(v1.MediaTypeImageLayerNonDistributable) &&
- layer.MediaType != string(v1.MediaTypeImageLayerNonDistributableGzip) &&
- layer.MediaType != string(v1.MediaTypeImageLayerNonDistributableZstd) {
+ layer.MediaType != string(v1.MediaTypeImageLayerNonDistributable) && //nolint:staticcheck
+ layer.MediaType != string(v1.MediaTypeImageLayerNonDistributableGzip) && //nolint:staticcheck
+ layer.MediaType != string(v1.MediaTypeImageLayerNonDistributableZstd) { //nolint:staticcheck
fmt.Printf("warning: layer %s has an unknown media type: %s\n", layer.Digest, layer.MediaType)
}
}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
index 6f9e6fd3ab..e628920460 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
@@ -65,7 +65,4 @@ const (
// AnnotationArtifactDescription is the annotation key for the human readable description for the artifact.
AnnotationArtifactDescription = "org.opencontainers.artifact.description"
-
- // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing.
- AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied"
)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go
deleted file mode 100644
index 03d76ce437..0000000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2022 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-// Artifact describes an artifact manifest.
-// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON.
-type Artifact struct {
- // MediaType is the media type of the object this schema refers to.
- MediaType string `json:"mediaType"`
-
- // ArtifactType is the IANA media type of the artifact this schema refers to.
- ArtifactType string `json:"artifactType"`
-
- // Blobs is a collection of blobs referenced by this manifest.
- Blobs []Descriptor `json:"blobs,omitempty"`
-
- // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest.
- Subject *Descriptor `json:"subject,omitempty"`
-
- // Annotations contains arbitrary metadata for the artifact manifest.
- Annotations map[string]string `json:"annotations,omitempty"`
-}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
index e6aa113f07..36b0aeb8f1 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
@@ -49,13 +49,15 @@ type ImageConfig struct {
// StopSignal contains the system call signal that will be sent to the container to exit.
StopSignal string `json:"StopSignal,omitempty"`
- // ArgsEscaped `[Deprecated]` - This field is present only for legacy
- // compatibility with Docker and should not be used by new image builders.
- // It is used by Docker for Windows images to indicate that the `Entrypoint`
- // or `Cmd` or both, contains only a single element array, that is a
- // pre-escaped, and combined into a single string `CommandLine`. If `true`
- // the value in `Entrypoint` or `Cmd` should be used as-is to avoid double
- // escaping.
+ // ArgsEscaped
+ //
+ // Deprecated: This field is present only for legacy compatibility with
+ // Docker and should not be used by new image builders. It is used by Docker
+ // for Windows images to indicate that the `Entrypoint` or `Cmd` or both,
+ // contains only a single element array, that is a pre-escaped, and combined
+ // into a single string `CommandLine`. If `true` the value in `Entrypoint` or
+ // `Cmd` should be used as-is to avoid double escaping.
+ // https://github.com/opencontainers/image-spec/pull/892
ArgsEscaped bool `json:"ArgsEscaped,omitempty"`
}
@@ -95,22 +97,8 @@ type Image struct {
// Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
Author string `json:"author,omitempty"`
- // Architecture is the CPU architecture which the binaries in this image are built to run on.
- Architecture string `json:"architecture"`
-
- // Variant is the variant of the specified CPU architecture which image binaries are intended to run on.
- Variant string `json:"variant,omitempty"`
-
- // OS is the name of the operating system which the image is built to run on.
- OS string `json:"os"`
-
- // OSVersion is an optional field specifying the operating system
- // version, for example on Windows `10.0.14393.1066`.
- OSVersion string `json:"os.version,omitempty"`
-
- // OSFeatures is an optional field specifying an array of strings,
- // each listing a required OS feature (for example on Windows `win32k`).
- OSFeatures []string `json:"os.features,omitempty"`
+ // Platform describes the platform which the image in the manifest runs on.
+ Platform
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
index 730a09359b..4ce7b54ccd 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
@@ -23,6 +23,9 @@ type Manifest struct {
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
MediaType string `json:"mediaType,omitempty"`
+ // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact.
+ ArtifactType string `json:"artifactType,omitempty"`
+
// Config references a configuration object for a container, by digest.
// The referenced configuration object is a JSON blob that the runtime uses to set up the container.
Config Descriptor `json:"config"`
@@ -36,3 +39,11 @@ type Manifest struct {
// Annotations contains arbitrary metadata for the image manifest.
Annotations map[string]string `json:"annotations,omitempty"`
}
+
+// ScratchDescriptor is the descriptor of a blob with content of `{}`.
+var ScratchDescriptor = Descriptor{
+ MediaType: MediaTypeScratch,
+ Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`,
+ Size: 2,
+ Data: []byte(`{}`),
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
index 935b481e3e..5dd31255eb 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
@@ -40,21 +40,36 @@ const (
// MediaTypeImageLayerNonDistributable is the media type for layers referenced by
// the manifest but with distribution restrictions.
+ //
+ // Deprecated: Non-distributable layers are deprecated, and not recommended
+ // for future use. Implementations SHOULD NOT produce new non-distributable
+ // layers.
+ // https://github.com/opencontainers/image-spec/pull/965
MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
// MediaTypeImageLayerNonDistributableGzip is the media type for
// gzipped layers referenced by the manifest but with distribution
// restrictions.
+ //
+ // Deprecated: Non-distributable layers are deprecated, and not recommended
+ // for future use. Implementations SHOULD NOT produce new non-distributable
+ // layers.
+ // https://github.com/opencontainers/image-spec/pull/965
MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
// MediaTypeImageLayerNonDistributableZstd is the media type for zstd
// compressed layers referenced by the manifest but with distribution
// restrictions.
+ //
+ // Deprecated: Non-distributable layers are deprecated, and not recommended
+ // for future use. Implementations SHOULD NOT produce new non-distributable
+ // layers.
+ // https://github.com/opencontainers/image-spec/pull/965
MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd"
// MediaTypeImageConfig specifies the media type for the image configuration.
MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
- // MediaTypeArtifactManifest specifies the media type for a content descriptor.
- MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json"
+ // MediaTypeScratch specifies the media type for an unused blob containing the value `{}`
+ MediaTypeScratch = "application/vnd.oci.scratch.v1+json"
)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
index 1afd590fe0..3d4119b441 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
@@ -25,7 +25,7 @@ const (
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
- VersionDev = "-dev"
+ VersionDev = "-rc.3"
)
// Version is the specification version that the package types support.
diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
index 3ffbcc91fe..53d4d66265 100644
--- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
+++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go
@@ -344,59 +344,59 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) {
out.GithubWorkflowRef = string(e.Value)
// END: Deprecated
case e.Id.Equal(OIDIssuerV2):
- if err := parseDERString(e.Value, &out.Issuer); err != nil {
+ if err := ParseDERString(e.Value, &out.Issuer); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildSignerURI):
- if err := parseDERString(e.Value, &out.BuildSignerURI); err != nil {
+ if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildSignerDigest):
- if err := parseDERString(e.Value, &out.BuildSignerDigest); err != nil {
+ if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDRunnerEnvironment):
- if err := parseDERString(e.Value, &out.RunnerEnvironment); err != nil {
+ if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryURI):
- if err := parseDERString(e.Value, &out.SourceRepositoryURI); err != nil {
+ if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryDigest):
- if err := parseDERString(e.Value, &out.SourceRepositoryDigest); err != nil {
+ if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryRef):
- if err := parseDERString(e.Value, &out.SourceRepositoryRef); err != nil {
+ if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryIdentifier):
- if err := parseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil {
+ if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryOwnerURI):
- if err := parseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil {
+ if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier):
- if err := parseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil {
+ if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildConfigURI):
- if err := parseDERString(e.Value, &out.BuildConfigURI); err != nil {
+ if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildConfigDigest):
- if err := parseDERString(e.Value, &out.BuildConfigDigest); err != nil {
+ if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildTrigger):
- if err := parseDERString(e.Value, &out.BuildTrigger); err != nil {
+ if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDRunInvocationURI):
- if err := parseDERString(e.Value, &out.RunInvocationURI); err != nil {
+ if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil {
return Extensions{}, err
}
}
@@ -407,9 +407,9 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) {
return out, nil
}
-// parseDERString decodes a DER-encoded string and puts the value in parsedVal.
-// Rerturns an error if the unmarshalling fails or if there are trailing bytes in the encoding.
-func parseDERString(val []byte, parsedVal *string) error {
+// ParseDERString decodes a DER-encoded string and puts the value in parsedVal.
+// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding.
+func ParseDERString(val []byte, parsedVal *string) error {
rest, err := asn1.Unmarshal(val, parsedVal)
if err != nil {
return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %v", err)
diff --git a/vendor/github.com/sigstore/rekor/pkg/log/log.go b/vendor/github.com/sigstore/rekor/pkg/log/log.go
new file mode 100644
index 0000000000..413b604f78
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/log/log.go
@@ -0,0 +1,115 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "context"
+ "log"
+
+ "github.com/go-chi/chi/middleware"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// Logger set the default logger to development mode
+var Logger *zap.SugaredLogger
+
+func init() {
+ ConfigureLogger("dev")
+}
+
+func ConfigureLogger(logType string) {
+ var cfg zap.Config
+ if logType == "prod" {
+ cfg = zap.NewProductionConfig()
+ cfg.EncoderConfig.LevelKey = "severity"
+ cfg.EncoderConfig.MessageKey = "message"
+ cfg.EncoderConfig.TimeKey = "time"
+ cfg.EncoderConfig.EncodeLevel = encodeLevel()
+ cfg.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder
+ cfg.EncoderConfig.EncodeDuration = zapcore.SecondsDurationEncoder
+ cfg.EncoderConfig.EncodeCaller = zapcore.FullCallerEncoder
+ } else {
+ cfg = zap.NewDevelopmentConfig()
+ cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
+ }
+ logger, err := cfg.Build()
+ if err != nil {
+ log.Fatalln("createLogger", err)
+ }
+ Logger = logger.Sugar()
+}
+
+func encodeLevel() zapcore.LevelEncoder {
+ return func(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
+ switch l {
+ case zapcore.DebugLevel:
+ enc.AppendString("DEBUG")
+ case zapcore.InfoLevel:
+ enc.AppendString("INFO")
+ case zapcore.WarnLevel:
+ enc.AppendString("WARNING")
+ case zapcore.ErrorLevel:
+ enc.AppendString("ERROR")
+ case zapcore.DPanicLevel:
+ enc.AppendString("CRITICAL")
+ case zapcore.PanicLevel:
+ enc.AppendString("ALERT")
+ case zapcore.FatalLevel:
+ enc.AppendString("EMERGENCY")
+ }
+ }
+}
+
+var CliLogger = createCliLogger()
+
+func createCliLogger() *zap.SugaredLogger {
+ cfg := zap.NewDevelopmentConfig()
+ cfg.EncoderConfig.TimeKey = ""
+ cfg.EncoderConfig.LevelKey = ""
+ cfg.DisableCaller = true
+ cfg.DisableStacktrace = true
+ logger, err := cfg.Build()
+ if err != nil {
+ log.Fatalln("createLogger", err)
+ }
+
+ return logger.Sugar()
+}
+
+func WithRequestID(ctx context.Context, id string) context.Context {
+ return context.WithValue(ctx, middleware.RequestIDKey, id)
+}
+
+type operation struct {
+ id string
+}
+
+func (o operation) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddString("id", o.id)
+ return nil
+}
+
+func ContextLogger(ctx context.Context) *zap.SugaredLogger {
+ proposedLogger := Logger
+ if ctx != nil {
+ if ctxRequestID, ok := ctx.Value(middleware.RequestIDKey).(string); ok {
+ requestID := operation{ctxRequestID}
+ proposedLogger = proposedLogger.With(zap.Object("operation", requestID))
+ }
+ }
+ return proposedLogger
+}
diff --git a/vendor/github.com/sigstore/rekor/pkg/util/trillian_client.go b/vendor/github.com/sigstore/rekor/pkg/util/trillian_client.go
new file mode 100644
index 0000000000..e1d0d5d6da
--- /dev/null
+++ b/vendor/github.com/sigstore/rekor/pkg/util/trillian_client.go
@@ -0,0 +1,380 @@
+//
+// Copyright 2021 The Sigstore Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "time"
+
+ "github.com/sigstore/rekor/pkg/log"
+ "github.com/transparency-dev/merkle/proof"
+ "github.com/transparency-dev/merkle/rfc6962"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ "github.com/google/trillian"
+ "github.com/google/trillian/client"
+ "github.com/google/trillian/types"
+)
+
+// TrillianClient provides a wrapper around the Trillian client
+type TrillianClient struct {
+ client trillian.TrillianLogClient
+ logID int64
+ context context.Context
+}
+
+// NewTrillianClient creates a TrillianClient with the given Trillian client and log/tree ID.
+func NewTrillianClient(ctx context.Context, logClient trillian.TrillianLogClient, logID int64) TrillianClient {
+ return TrillianClient{
+ client: logClient,
+ logID: logID,
+ context: ctx,
+ }
+}
+
+// Response includes a status code, an optional error message, and one of the results based on the API call
+type Response struct {
+ // Status is the status code of the response
+ Status codes.Code
+ // Error contains an error on request or client failure
+ Err error
+ // GetAddResult contains the response from queueing a leaf in Trillian
+ GetAddResult *trillian.QueueLeafResponse
+ // GetLeafAndProofResult contains the response for fetching an inclusion proof and leaf
+ GetLeafAndProofResult *trillian.GetEntryAndProofResponse
+ // GetLatestResult contains the response for the latest checkpoint
+ GetLatestResult *trillian.GetLatestSignedLogRootResponse
+ // GetConsistencyProofResult contains the response for a consistency proof between two log sizes
+ GetConsistencyProofResult *trillian.GetConsistencyProofResponse
+ // getProofResult contains the response for an inclusion proof fetched by leaf hash
+ getProofResult *trillian.GetInclusionProofByHashResponse
+}
+
+func unmarshalLogRoot(logRoot []byte) (types.LogRootV1, error) {
+ var root types.LogRootV1
+ if err := root.UnmarshalBinary(logRoot); err != nil {
+ return types.LogRootV1{}, err
+ }
+ return root, nil
+}
+
+func (t *TrillianClient) root() (types.LogRootV1, error) {
+ rqst := &trillian.GetLatestSignedLogRootRequest{
+ LogId: t.logID,
+ }
+ resp, err := t.client.GetLatestSignedLogRoot(t.context, rqst)
+ if err != nil {
+ return types.LogRootV1{}, err
+ }
+ return unmarshalLogRoot(resp.SignedLogRoot.LogRoot)
+}
+
+func (t *TrillianClient) AddLeaf(byteValue []byte) *Response {
+ leaf := &trillian.LogLeaf{
+ LeafValue: byteValue,
+ }
+ rqst := &trillian.QueueLeafRequest{
+ LogId: t.logID,
+ Leaf: leaf,
+ }
+ resp, err := t.client.QueueLeaf(t.context, rqst)
+
+ // check for error
+ if err != nil || (resp.QueuedLeaf.Status != nil && resp.QueuedLeaf.Status.Code != int32(codes.OK)) {
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ GetAddResult: resp,
+ }
+ }
+
+ root, err := t.root()
+ if err != nil {
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ GetAddResult: resp,
+ }
+ }
+ v := client.NewLogVerifier(rfc6962.DefaultHasher)
+ logClient := client.New(t.logID, t.client, v, root)
+
+ waitForInclusion := func(ctx context.Context, leafHash []byte) *Response {
+ if logClient.MinMergeDelay > 0 {
+ select {
+ case <-ctx.Done():
+ return &Response{
+ Status: codes.DeadlineExceeded,
+ Err: ctx.Err(),
+ }
+ case <-time.After(logClient.MinMergeDelay):
+ }
+ }
+ for {
+ root = *logClient.GetRoot()
+ if root.TreeSize >= 1 {
+ proofResp := t.getProofByHash(resp.QueuedLeaf.Leaf.MerkleLeafHash)
+ // if this call succeeds or returns an error other than "not found", return
+ if proofResp.Err == nil || (proofResp.Err != nil && status.Code(proofResp.Err) != codes.NotFound) {
+ return proofResp
+ }
+ // otherwise wait for a root update before trying again
+ }
+
+ if _, err := logClient.WaitForRootUpdate(ctx); err != nil {
+ return &Response{
+ Status: codes.Unknown,
+ Err: err,
+ }
+ }
+ }
+ }
+
+ proofResp := waitForInclusion(t.context, resp.QueuedLeaf.Leaf.MerkleLeafHash)
+ if proofResp.Err != nil {
+ return &Response{
+ Status: status.Code(proofResp.Err),
+ Err: proofResp.Err,
+ GetAddResult: resp,
+ }
+ }
+
+ proofs := proofResp.getProofResult.Proof
+ if len(proofs) != 1 {
+ err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(resp.QueuedLeaf.Leaf.MerkleLeafHash), len(proofs))
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ GetAddResult: resp,
+ }
+ }
+
+ leafIndex := proofs[0].LeafIndex
+ leafResp := t.GetLeafAndProofByIndex(leafIndex)
+ if leafResp.Err != nil {
+ return &Response{
+ Status: status.Code(leafResp.Err),
+ Err: leafResp.Err,
+ GetAddResult: resp,
+ }
+ }
+
+ // overwrite queued leaf that doesn't have index set
+ resp.QueuedLeaf.Leaf = leafResp.GetLeafAndProofResult.Leaf
+
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ GetAddResult: resp,
+ // include getLeafAndProofResult for inclusion proof
+ GetLeafAndProofResult: leafResp.GetLeafAndProofResult,
+ }
+}
+
+func (t *TrillianClient) GetLeafAndProofByHash(hash []byte) *Response {
+ // get inclusion proof for hash, extract index, then fetch leaf using index
+ proofResp := t.getProofByHash(hash)
+ if proofResp.Err != nil {
+ return &Response{
+ Status: status.Code(proofResp.Err),
+ Err: proofResp.Err,
+ }
+ }
+
+ proofs := proofResp.getProofResult.Proof
+ if len(proofs) != 1 {
+ err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(hash), len(proofs))
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ }
+ }
+
+ return t.GetLeafAndProofByIndex(proofs[0].LeafIndex)
+}
+
+func (t *TrillianClient) GetLeafAndProofByIndex(index int64) *Response {
+ ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
+ defer cancel()
+
+ rootResp := t.GetLatest(0)
+ if rootResp.Err != nil {
+ return &Response{
+ Status: status.Code(rootResp.Err),
+ Err: rootResp.Err,
+ }
+ }
+
+ root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot)
+ if err != nil {
+ return &Response{
+ Status: status.Code(rootResp.Err),
+ Err: rootResp.Err,
+ }
+ }
+
+ resp, err := t.client.GetEntryAndProof(ctx,
+ &trillian.GetEntryAndProofRequest{
+ LogId: t.logID,
+ LeafIndex: index,
+ TreeSize: int64(root.TreeSize),
+ })
+
+ if resp != nil && resp.Proof != nil {
+ if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(index), root.TreeSize, resp.GetLeaf().MerkleLeafHash, resp.Proof.Hashes, root.RootHash); err != nil {
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ }
+ }
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ GetLeafAndProofResult: &trillian.GetEntryAndProofResponse{
+ Proof: resp.Proof,
+ Leaf: resp.Leaf,
+ SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot,
+ },
+ }
+ }
+
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ }
+}
+
+func (t *TrillianClient) GetLatest(leafSizeInt int64) *Response {
+
+ ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
+ defer cancel()
+
+ resp, err := t.client.GetLatestSignedLogRoot(ctx,
+ &trillian.GetLatestSignedLogRootRequest{
+ LogId: t.logID,
+ FirstTreeSize: leafSizeInt,
+ })
+
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ GetLatestResult: resp,
+ }
+}
+
+func (t *TrillianClient) GetConsistencyProof(firstSize, lastSize int64) *Response {
+
+ ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
+ defer cancel()
+
+ resp, err := t.client.GetConsistencyProof(ctx,
+ &trillian.GetConsistencyProofRequest{
+ LogId: t.logID,
+ FirstTreeSize: firstSize,
+ SecondTreeSize: lastSize,
+ })
+
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ GetConsistencyProofResult: resp,
+ }
+}
+
+func (t *TrillianClient) getProofByHash(hashValue []byte) *Response {
+ ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
+ defer cancel()
+
+ rootResp := t.GetLatest(0)
+ if rootResp.Err != nil {
+ return &Response{
+ Status: status.Code(rootResp.Err),
+ Err: rootResp.Err,
+ }
+ }
+ root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot)
+ if err != nil {
+ return &Response{
+ Status: status.Code(rootResp.Err),
+ Err: rootResp.Err,
+ }
+ }
+
+ // issue 1308: if the tree is empty, there's no way we can return a proof
+ if root.TreeSize == 0 {
+ return &Response{
+ Status: codes.NotFound,
+ Err: status.Error(codes.NotFound, "tree is empty"),
+ }
+ }
+
+ resp, err := t.client.GetInclusionProofByHash(ctx,
+ &trillian.GetInclusionProofByHashRequest{
+ LogId: t.logID,
+ LeafHash: hashValue,
+ TreeSize: int64(root.TreeSize),
+ })
+
+ if resp != nil {
+ v := client.NewLogVerifier(rfc6962.DefaultHasher)
+ for _, proof := range resp.Proof {
+ if err := v.VerifyInclusionByHash(&root, hashValue, proof); err != nil {
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ }
+ }
+ }
+ // Return an inclusion proof response with the requested
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ getProofResult: &trillian.GetInclusionProofByHashResponse{
+ Proof: resp.Proof,
+ SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot,
+ },
+ }
+ }
+
+ return &Response{
+ Status: status.Code(err),
+ Err: err,
+ }
+}
+
+func CreateAndInitTree(ctx context.Context, adminClient trillian.TrillianAdminClient, logClient trillian.TrillianLogClient) (*trillian.Tree, error) {
+ t, err := adminClient.CreateTree(ctx, &trillian.CreateTreeRequest{
+ Tree: &trillian.Tree{
+ TreeType: trillian.TreeType_LOG,
+ TreeState: trillian.TreeState_ACTIVE,
+ MaxRootDuration: durationpb.New(time.Hour),
+ },
+ })
+ if err != nil {
+ return nil, fmt.Errorf("create tree: %w", err)
+ }
+
+ if err := client.InitLog(ctx, t, logClient); err != nil {
+ return nil, fmt.Errorf("init log: %w", err)
+ }
+ log.Logger.Infof("Created new tree with ID: %v", t.TreeId)
+ return t, nil
+}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
index d2b94d4d93..a8b2805e64 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
@@ -179,6 +179,6 @@ func validateEcdsaKey(pub *ecdsa.PublicKey) error {
}
// No validations currently, ED25519 supports only one key size.
-func validateEd25519Key(pub ed25519.PublicKey) error {
+func validateEd25519Key(_ ed25519.PublicKey) error {
return nil
}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
index 04e4f3b5fb..c7f1ccb917 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
@@ -25,25 +25,25 @@ import (
type NoOpOptionImpl struct{}
// ApplyContext is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyContext(ctx *context.Context) {}
+func (NoOpOptionImpl) ApplyContext(_ *context.Context) {}
// ApplyCryptoSignerOpts is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) {}
+func (NoOpOptionImpl) ApplyCryptoSignerOpts(_ *crypto.SignerOpts) {}
// ApplyDigest is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyDigest(digest *[]byte) {}
+func (NoOpOptionImpl) ApplyDigest(_ *[]byte) {}
// ApplyRand is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyRand(rand *io.Reader) {}
+func (NoOpOptionImpl) ApplyRand(_ *io.Reader) {}
// ApplyRemoteVerification is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyRemoteVerification(remoteVerification *bool) {}
+func (NoOpOptionImpl) ApplyRemoteVerification(_ *bool) {}
// ApplyRPCAuthOpts is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyRPCAuthOpts(opts *RPCAuth) {}
+func (NoOpOptionImpl) ApplyRPCAuthOpts(_ *RPCAuth) {}
// ApplyKeyVersion is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyKeyVersion(keyVersion *string) {}
+func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {}
// ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces
-func (NoOpOptionImpl) ApplyKeyVersionUsed(keyVersion **string) {}
+func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
index 2c15eeab74..0e1f9103df 100644
--- a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
@@ -622,60 +622,52 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
return fmt.Errorf("%w", errNotPartition)
}
- fs, pt, arch, err := descr.getPartitionMetadata()
- if err != nil {
+ var p partition
+ if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil {
return fmt.Errorf("%w", err)
}
// if already primary system partition, nothing to do
- if pt == PartPrimSys {
+ if p.Parttype == PartPrimSys {
return nil
}
- if pt != PartSystem {
+ if p.Parttype != PartSystem {
return fmt.Errorf("%w", errNotSystem)
}
- olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys))
- if err != nil && !errors.Is(err, ErrObjectNotFound) {
- return fmt.Errorf("%w", err)
- }
- extra := partition{
- Fstype: fs,
- Parttype: PartPrimSys,
- }
- copy(extra.Arch[:], arch)
-
- if err := descr.setExtra(extra); err != nil {
- return fmt.Errorf("%w", err)
- }
-
- descr.ModifiedAt = so.t.Unix()
-
- if olddescr != nil {
- oldfs, _, oldarch, err := olddescr.getPartitionMetadata()
- if err != nil {
+ // If there is currently a primary system partition, update it.
+ if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil {
+ var p partition
+ if err := d.getExtra(binaryUnmarshaler{&p}); err != nil {
return fmt.Errorf("%w", err)
}
- oldextra := partition{
- Fstype: oldfs,
- Parttype: PartSystem,
- Arch: getSIFArch(oldarch),
- }
+ p.Parttype = PartSystem
- if err := olddescr.setExtra(oldextra); err != nil {
+ if err := d.setExtra(p); err != nil {
return fmt.Errorf("%w", err)
}
- olddescr.ModifiedAt = so.t.Unix()
+ d.ModifiedAt = so.t.Unix()
+ } else if !errors.Is(err, ErrObjectNotFound) {
+ return fmt.Errorf("%w", err)
}
+ // Update the descriptor of the new primary system partition.
+ p.Parttype = PartPrimSys
+
+ if err := descr.setExtra(p); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ descr.ModifiedAt = so.t.Unix()
+
if err := f.writeDescriptors(); err != nil {
return fmt.Errorf("%w", err)
}
- f.h.Arch = getSIFArch(arch)
+ f.h.Arch = p.Arch
f.h.ModifiedAt = so.t.Unix()
if err := f.writeHeader(); err != nil {
diff --git a/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md
new file mode 100644
index 0000000000..43de4c9d47
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md
@@ -0,0 +1,58 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+Once your CLA is submitted (or if you already submitted one for
+another Google project), make a commit adding yourself to the
+[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
+of your first [pull request][].
+
+[AUTHORS]: AUTHORS
+[CONTRIBUTORS]: CONTRIBUTORS
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/transparency-dev/merkle/LICENSE b/vendor/github.com/transparency-dev/merkle/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/transparency-dev/merkle/README.md b/vendor/github.com/transparency-dev/merkle/README.md
new file mode 100644
index 0000000000..3c8d212711
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/README.md
@@ -0,0 +1,25 @@
+# Merkle
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/transparency-dev/merkle.svg)](https://pkg.go.dev/github.com/transparency-dev/merkle)
+[![Go Report
+Card](https://goreportcard.com/badge/github.com/transparency-dev/merkle)](https://goreportcard.com/report/github.com/transparency-dev/merkle)
+[![codecov](https://codecov.io/gh/transparency-dev/merkle/branch/main/graph/badge.svg?token=BBCRAMOBY2)](https://codecov.io/gh/transparency-dev/merkle)
+[![Slack
+Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/)
+
+## Overview
+
+This repository contains Go code to help create and manipulate Merkle trees, as
+well as constructing and verifying various types of proof.
+
+This is the data structure which is used by projects such as
+[Trillian](https://github.com/google/trillian) to provide
+[verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log).
+
+
+## Support
+* Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency
+* Slack: https://gtrillian.slack.com/ (invitation)
+
+
+
diff --git a/vendor/github.com/transparency-dev/merkle/compact/nodes.go b/vendor/github.com/transparency-dev/merkle/compact/nodes.go
new file mode 100644
index 0000000000..c53a96a4c3
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/compact/nodes.go
@@ -0,0 +1,89 @@
+// Copyright 2019 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compact
+
+import "math/bits"
+
+// NodeID identifies a node of a Merkle tree.
+//
+// The ID consists of a level and index within this level. Levels are numbered
+// from 0, which corresponds to the tree leaves. Within each level, nodes are
+// numbered with consecutive indices starting from 0.
+//
+// L4: ┌───────0───────┐ ...
+// L3: ┌───0───┐ ┌───1───┐ ┌─── ...
+// L2: ┌─0─┐ ┌─1─┐ ┌─2─┐ ┌─3─┐ ┌─4─┐ ...
+// L1: ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ...
+// L0: 0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ...
+//
+// When the tree is not perfect, the nodes that would complement it to perfect
+// are called ephemeral. Algorithms that operate with ephemeral nodes still map
+// them to the same address space.
+type NodeID struct {
+ Level uint
+ Index uint64
+}
+
+// NewNodeID returns a NodeID with the passed in node coordinates.
+func NewNodeID(level uint, index uint64) NodeID {
+ return NodeID{Level: level, Index: index}
+}
+
+// Parent returns the ID of the parent node.
+func (id NodeID) Parent() NodeID {
+ return NewNodeID(id.Level+1, id.Index>>1)
+}
+
+// Sibling returns the ID of the sibling node.
+func (id NodeID) Sibling() NodeID {
+ return NewNodeID(id.Level, id.Index^1)
+}
+
+// Coverage returns the [begin, end) range of leaves covered by the node.
+func (id NodeID) Coverage() (uint64, uint64) {
+ return id.Index << id.Level, (id.Index + 1) << id.Level
+}
+
+// RangeNodes appends the IDs of the nodes that comprise the [begin, end)
+// compact range to the given slice, and returns the new slice. The caller may
+// pre-allocate space with the help of the RangeSize function.
+func RangeNodes(begin, end uint64, ids []NodeID) []NodeID {
+ left, right := Decompose(begin, end)
+
+ pos := begin
+ // Iterate over perfect subtrees along the left border of the range, ordered
+ // from lower to upper levels.
+ for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit {
+ level := uint(bits.TrailingZeros64(left))
+ bit = uint64(1) << level
+ ids = append(ids, NewNodeID(level, pos>>level))
+ }
+
+ // Iterate over perfect subtrees along the right border of the range, ordered
+ // from upper to lower levels.
+ for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit {
+ level := uint(bits.Len64(right)) - 1
+ bit = uint64(1) << level
+ ids = append(ids, NewNodeID(level, pos>>level))
+ }
+
+ return ids
+}
+
+// RangeSize returns the number of nodes in the [begin, end) compact range.
+func RangeSize(begin, end uint64) int {
+ left, right := Decompose(begin, end)
+ return bits.OnesCount64(left) + bits.OnesCount64(right)
+}
diff --git a/vendor/github.com/transparency-dev/merkle/compact/range.go b/vendor/github.com/transparency-dev/merkle/compact/range.go
new file mode 100644
index 0000000000..a34c0be973
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/compact/range.go
@@ -0,0 +1,264 @@
+// Copyright 2019 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package compact provides compact Merkle tree data structures.
+package compact
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+)
+
+// HashFn computes an internal node's hash using the hashes of its child nodes.
+type HashFn func(left, right []byte) []byte
+
+// VisitFn visits the node with the specified ID and hash.
+type VisitFn func(id NodeID, hash []byte)
+
+// RangeFactory allows creating compact ranges with the specified hash
+// function, which must not be nil, and must not be changed.
+type RangeFactory struct {
+ Hash HashFn
+}
+
+// NewRange creates a Range for [begin, end) with the given set of hashes. The
+// hashes correspond to the roots of the minimal set of perfect sub-trees
+// covering the [begin, end) leaves range, ordered left to right.
+func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) {
+ if end < begin {
+ return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin)
+ }
+ if got, want := len(hashes), RangeSize(begin, end); got != want {
+ return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want)
+ }
+ return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil
+}
+
+// NewEmptyRange returns a new Range for an empty [begin, begin) range. The
+// value of begin defines where the range will start growing from when entries
+// are appended to it.
+func (f *RangeFactory) NewEmptyRange(begin uint64) *Range {
+ return &Range{f: f, begin: begin, end: begin}
+}
+
+// Range represents a compact Merkle tree range for leaf indices [begin, end).
+//
+// It contains the minimal set of perfect subtrees whose leaves comprise this
+// range. The structure is efficiently mergeable with other compact ranges that
+// share one of the endpoints with it.
+//
+// For more details, see
+// https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md.
+type Range struct {
+ f *RangeFactory
+ begin uint64
+ end uint64
+ hashes [][]byte
+}
+
+// Begin returns the first index covered by the range (inclusive).
+func (r *Range) Begin() uint64 {
+ return r.begin
+}
+
+// End returns the last index covered by the range (exclusive).
+func (r *Range) End() uint64 {
+ return r.end
+}
+
+// Hashes returns sub-tree hashes corresponding to the minimal set of perfect
+// sub-trees covering the [begin, end) range, ordered left to right.
+func (r *Range) Hashes() [][]byte {
+ return r.hashes
+}
+
+// Append extends the compact range by appending the passed in hash to it. It
+// reports all the added nodes through the visitor function (if non-nil).
+func (r *Range) Append(hash []byte, visitor VisitFn) error {
+ if visitor != nil {
+ visitor(NewNodeID(0, r.end), hash)
+ }
+ return r.appendImpl(r.end+1, hash, nil, visitor)
+}
+
+// AppendRange extends the compact range by merging in the other compact range
+// from the right. It uses the tree hasher to calculate hashes of newly created
+// nodes, and reports them through the visitor function (if non-nil).
+func (r *Range) AppendRange(other *Range, visitor VisitFn) error {
+ if other.f != r.f {
+ return errors.New("incompatible ranges")
+ }
+ if got, want := other.begin, r.end; got != want {
+ return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want)
+ }
+ if len(other.hashes) == 0 { // The other range is empty, merging is trivial.
+ return nil
+ }
+ return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor)
+}
+
+// GetRootHash returns the root hash of the Merkle tree represented by this
+// compact range. Requires the range to start at index 0. If the range is
+// empty, returns nil.
+//
+// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the
+// ones rooting imperfect subtrees) along the right border of the tree.
+func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) {
+ if r.begin != 0 {
+ return nil, fmt.Errorf("begin=%d, want 0", r.begin)
+ }
+ ln := len(r.hashes)
+ if ln == 0 {
+ return nil, nil
+ }
+ hash := r.hashes[ln-1]
+ // All non-perfect subtree hashes along the right border of the tree
+ // correspond to the parents of all perfect subtree nodes except the lowest
+ // one (therefore the loop skips it).
+ for i, size := ln-2, r.end; i >= 0; i-- {
+ hash = r.f.Hash(r.hashes[i], hash)
+ if visitor != nil {
+ size &= size - 1 // Delete the previous node.
+ level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level.
+ index := size >> level // And its horizontal index.
+ visitor(NewNodeID(level, index), hash)
+ }
+ }
+ return hash, nil
+}
+
+// Equal compares two Ranges for equality.
+func (r *Range) Equal(other *Range) bool {
+ if r.f != other.f || r.begin != other.begin || r.end != other.end {
+ return false
+ }
+ if len(r.hashes) != len(other.hashes) {
+ return false
+ }
+ for i := range r.hashes {
+ if !bytes.Equal(r.hashes[i], other.hashes[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// appendImpl extends the compact range by merging the [r.end, end) compact
+// range into it. The other compact range is decomposed into a seed hash and
+// all the other hashes (possibly none). The method uses the tree hasher to
+// calculate hashes of newly created nodes, and reports them through the
+// visitor function (if non-nil).
+func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error {
+ // Bits [low, high) of r.end encode the merge path, i.e. the sequence of node
+ // merges that transforms the two compact ranges into one.
+ low, high := getMergePath(r.begin, r.end, end)
+ if high < low {
+ high = low
+ }
+ index := r.end >> low
+ // Now bits [0, high-low) of index encode the merge path.
+
+ // The number of one bits in index is the number of nodes from the left range
+ // that will be merged, and zero bits correspond to the nodes in the right
+ // range. Below we make sure that both ranges have enough hashes, which can
+ // be false only in case the data is corrupted in some way.
+ ones := bits.OnesCount64(index & (1<<(high-low) - 1))
+ if ln := len(r.hashes); ln < ones {
+ return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones)
+ }
+ if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros {
+ return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1)
+ }
+
+ // Some of the trailing nodes of the left compact range, and some of the
+ // leading nodes of the right range, are sequentially merged with the seed,
+ // according to the mask. All new nodes are reported through the visitor.
+ idx1, idx2 := len(r.hashes), 0
+ for h := low; h < high; h++ {
+ if index&1 == 0 {
+ seed = r.f.Hash(seed, hashes[idx2])
+ idx2++
+ } else {
+ idx1--
+ seed = r.f.Hash(r.hashes[idx1], seed)
+ }
+ index >>= 1
+ if visitor != nil {
+ visitor(NewNodeID(h+1, index), seed)
+ }
+ }
+
+ // All nodes from both ranges that have not been merged are bundled together
+ // with the "merged" seed node.
+ r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...)
+ r.end = end
+ return nil
+}
+
+// getMergePath returns the merging path between the compact range [begin, mid)
+// and [mid, end). The path is represented as a range of bits within mid, with
+// bit indices [low, high). A bit value of 1 on level i of mid means that the
+// node on this level merges with the corresponding node in the left compact
+// range, whereas 0 represents merging with the right compact range. If the
+// path is empty then high <= low.
+//
+// The output is not specified if begin <= mid <= end doesn't hold, but the
+// function never panics.
+func getMergePath(begin, mid, end uint64) (uint, uint) {
+ low := bits.TrailingZeros64(mid)
+ high := 64
+ if begin != 0 {
+ high = bits.Len64(mid ^ (begin - 1))
+ }
+ if high2 := bits.Len64((mid - 1) ^ end); high2 < high {
+ high = high2
+ }
+ return uint(low), uint(high - 1)
+}
+
+// Decompose splits the [begin, end) range into a minimal number of sub-ranges,
+// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for
+// some integers m, k >= 0.
+//
+// The sequence of sizes is returned encoded as bitmasks left and right, where:
+// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k
+// - left mask bits in LSB-to-MSB order encode the left part of the sequence
+// - right mask bits in MSB-to-LSB order encode the right part
+//
+// The corresponding values of m are not returned (they can be calculated from
+// begin and the sub-range sizes).
+//
+// For example, (begin, end) values of (0b110, 0b11101) would indicate a
+// sequence of tree sizes: 2,8; 8,4,1.
+//
+// The output is not specified if begin > end, but the function never panics.
+func Decompose(begin, end uint64) (uint64, uint64) {
+ // Special case, as the code below works only if begin != 0, or end < 2^63.
+ if begin == 0 {
+ return 0, end
+ }
+ xbegin := begin - 1
+ // Find where paths to leaves #begin-1 and #end diverge, and mask the upper
+ // bits away, as only the nodes strictly below this point are in the range.
+ d := bits.Len64(xbegin^end) - 1
+ mask := uint64(1)<= size {
+ return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size)
+ }
+ return nodes(index, 0, size).skipFirst(), nil
+}
+
+// Consistency returns the information on how to fetch and construct a
+// consistency proof between the two given tree sizes of a log Merkle tree. It
+// requires 0 <= size1 <= size2.
+func Consistency(size1, size2 uint64) (Nodes, error) {
+ if size1 > size2 {
+ return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2)
+ }
+ if size1 == size2 || size1 == 0 {
+ return Nodes{IDs: []compact.NodeID{}}, nil
+ }
+
+ // Find the root of the biggest perfect subtree that ends at size1.
+ level := uint(bits.TrailingZeros64(size1))
+ index := (size1 - 1) >> level
+ // The consistency proof consists of this node (except if size1 is a power of
+ // two, in which case adding this node would be redundant because the client
+ // is assumed to know it from a checkpoint), and nodes of the inclusion proof
+ // into this node in the tree of size2.
+ p := nodes(index, level, size2)
+
+ // Handle the case when size1 is a power of 2.
+ if index == 0 {
+ return p.skipFirst(), nil
+ }
+ return p, nil
+}
+
+// nodes returns the node IDs necessary to prove that the (level, index) node
+// is included in the Merkle tree of the given size.
+func nodes(index uint64, level uint, size uint64) Nodes {
+ // Compute the `fork` node, where the path from root to (level, index) node
+ // diverges from the path to (0, size).
+ //
+ // The sibling of this node is the ephemeral node which represents a subtree
+ // that is not complete in the tree of the given size. To compute the hash
+ // of the ephemeral node, we need all the non-ephemeral nodes that cover the
+ // same range of leaves.
+ //
+ // The `inner` variable is how many layers up from (level, index) the `fork`
+ // and the ephemeral nodes are.
+ inner := bits.Len64(index^(size>>level)) - 1
+ fork := compact.NewNodeID(level+uint(inner), index>>inner)
+
+ begin, end := fork.Coverage()
+ left := compact.RangeSize(0, begin)
+ right := compact.RangeSize(end, size)
+
+ node := compact.NewNodeID(level, index)
+ // Pre-allocate the exact number of nodes for the proof, in order:
+ // - The seed node for which we are building the proof.
+ // - The `inner` nodes at each level up to the fork node.
+ // - The `right` nodes, comprising the ephemeral node.
+ // - The `left` nodes, completing the coverage of the whole [0, size) range.
+ nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node)
+
+ // The first portion of the proof consists of the siblings for nodes of the
+ // path going up to the level at which the ephemeral node appears.
+ for ; node.Level < fork.Level; node = node.Parent() {
+ nodes = append(nodes, node.Sibling())
+ }
+ // This portion of the proof covers the range [begin, end) under it. The
+ // ranges to the left and to the right from it remain to be covered.
+
+ // Add all the nodes (potentially none) that cover the right range, and
+ // represent the ephemeral node. Reverse them so that the Rehash method can
+ // process hashes in the convenient order, from lower to upper levels.
+ len1 := len(nodes)
+ nodes = compact.RangeNodes(end, size, nodes)
+ reverse(nodes[len(nodes)-right:])
+ len2 := len(nodes)
+ // Add the nodes that cover the left range, ordered increasingly by level.
+ nodes = compact.RangeNodes(0, begin, nodes)
+ reverse(nodes[len(nodes)-left:])
+
+ // nodes[len1:len2] contains the nodes representing the ephemeral node. If
+ // it's empty, make it zero. Note that it can also contain a single node.
+ // Depending on the preference of the layer above, it may or may not be
+ // considered ephemeral.
+ if len1 >= len2 {
+ len1, len2 = 0, 0
+ }
+
+ return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()}
+}
+
+// Ephem returns the ephemeral node, and indices begin and end, such that
+// IDs[begin:end] slice contains the child nodes of the ephemeral node.
+//
+// The list is empty iff there are no ephemeral nodes in the proof. Some
+// examples of when this can happen: a proof in a perfect tree; an inclusion
+// proof for a leaf in a perfect subtree at the right edge of the tree.
+func (n Nodes) Ephem() (compact.NodeID, int, int) {
+ return n.ephem, n.begin, n.end
+}
+
+// Rehash computes the proof based on the slice of node hashes corresponding to
+// their IDs in the n.IDs field. The slices must be of the same length. The hc
+// parameter computes a node's hash based on hashes of its children.
+//
+// Warning: The passed-in slice of hashes can be modified in-place.
+func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) {
+ if got, want := len(h), len(n.IDs); got != want {
+ return nil, fmt.Errorf("got %d hashes but expected %d", got, want)
+ }
+ cursor := 0
+ // Scan the list of node hashes, and store the rehashed list in-place.
+ // Invariant: cursor <= i, and h[:cursor] contains all the hashes of the
+ // rehashed list after scanning h up to index i-1.
+ for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 {
+ hash := h[i]
+ if i >= n.begin && i < n.end {
+ // Scan the block of node hashes that need rehashing.
+ for i++; i < n.end; i++ {
+ hash = hc(h[i], hash)
+ }
+ i--
+ }
+ h[cursor] = hash
+ }
+ return h[:cursor], nil
+}
+
+func (n Nodes) skipFirst() Nodes {
+ n.IDs = n.IDs[1:]
+ // Fixup the indices into the IDs slice.
+ if n.begin < n.end {
+ n.begin--
+ n.end--
+ }
+ return n
+}
+
+func reverse(ids []compact.NodeID) {
+ for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 {
+ ids[i], ids[j] = ids[j], ids[i]
+ }
+}
diff --git a/vendor/github.com/transparency-dev/merkle/proof/verify.go b/vendor/github.com/transparency-dev/merkle/proof/verify.go
new file mode 100644
index 0000000000..d42e1afe36
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/proof/verify.go
@@ -0,0 +1,176 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proof
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+
+ "github.com/transparency-dev/merkle"
+)
+
+// RootMismatchError occurs when an inclusion proof fails.
+type RootMismatchError struct {
+ ExpectedRoot []byte
+ CalculatedRoot []byte
+}
+
+func (e RootMismatchError) Error() string {
+ return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot)
+}
+
+func verifyMatch(calculated, expected []byte) error {
+ if !bytes.Equal(calculated, expected) {
+ return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated}
+ }
+ return nil
+}
+
+// VerifyInclusion verifies the correctness of the inclusion proof for the leaf
+// with the specified hash and index, relatively to the tree of the given size
+// and root hash. Requires 0 <= index < size.
+func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error {
+ calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof)
+ if err != nil {
+ return err
+ }
+ return verifyMatch(calcRoot, root)
+}
+
+// RootFromInclusionProof calculates the expected root hash for a tree of the
+// given size, provided a leaf index and hash with the corresponding inclusion
+// proof. Requires 0 <= index < size.
+func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) {
+ if index >= size {
+ return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size)
+ }
+ if got, want := len(leafHash), hasher.Size(); got != want {
+ return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want)
+ }
+
+ inner, border := decompInclProof(index, size)
+ if got, want := len(proof), inner+border; got != want {
+ return nil, fmt.Errorf("wrong proof size %d, want %d", got, want)
+ }
+
+ res := chainInner(hasher, leafHash, proof[:inner], index)
+ res = chainBorderRight(hasher, res, proof[inner:])
+ return res, nil
+}
+
+// VerifyConsistency checks that the passed-in consistency proof is valid
+// between the passed in tree sizes, with respect to the corresponding root
+// hashes. Requires 0 <= size1 <= size2.
+func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error {
+ switch {
+ case size2 < size1:
+ return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2)
+ case size1 == size2:
+ if len(proof) > 0 {
+ return errors.New("size1=size2, but proof is not empty")
+ }
+ return verifyMatch(root1, root2)
+ case size1 == 0:
+ // Any size greater than 0 is consistent with size 0.
+ if len(proof) > 0 {
+ return fmt.Errorf("expected empty proof, but got %d components", len(proof))
+ }
+ return nil // Proof OK.
+ case len(proof) == 0:
+ return errors.New("empty proof")
+ }
+
+ inner, border := decompInclProof(size1-1, size2)
+ shift := bits.TrailingZeros64(size1)
+ inner -= shift // Note: shift < inner if size1 < size2.
+
+ // The proof includes the root hash for the sub-tree of size 2^shift.
+ seed, start := proof[0], 1
+ if size1 == 1<> uint(shift) // Start chaining from level |shift|.
+ hash1 := chainInnerRight(hasher, seed, proof[:inner], mask)
+ hash1 = chainBorderRight(hasher, hash1, proof[inner:])
+ if err := verifyMatch(hash1, root1); err != nil {
+ return err
+ }
+
+ // Verify the second root.
+ hash2 := chainInner(hasher, seed, proof[:inner], mask)
+ hash2 = chainBorderRight(hasher, hash2, proof[inner:])
+ return verifyMatch(hash2, root2)
+}
+
+// decompInclProof breaks down inclusion proof for a leaf at the specified
+// |index| in a tree of the specified |size| into 2 components. The splitting
+// point between them is where paths to leaves |index| and |size-1| diverge.
+// Returns lengths of the bottom and upper proof parts correspondingly. The sum
+// of the two determines the correct length of the inclusion proof.
+func decompInclProof(index, size uint64) (int, int) {
+ inner := innerProofSize(index, size)
+ border := bits.OnesCount64(index >> uint(inner))
+ return inner, border
+}
+
+func innerProofSize(index, size uint64) int {
+ return bits.Len64(index ^ (size - 1))
+}
+
+// chainInner computes a subtree hash for a node on or below the tree's right
+// border. Assumes |proof| hashes are ordered from lower levels to upper, and
+// |seed| is the initial subtree/leaf hash on the path located at the specified
+// |index| on its level.
+func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
+ for i, h := range proof {
+ if (index>>uint(i))&1 == 0 {
+ seed = hasher.HashChildren(seed, h)
+ } else {
+ seed = hasher.HashChildren(h, seed)
+ }
+ }
+ return seed
+}
+
+// chainInnerRight computes a subtree hash like chainInner, but only takes
+// hashes to the left from the path into consideration, which effectively means
+// the result is a hash of the corresponding earlier version of this subtree.
+func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
+ for i, h := range proof {
+ if (index>>uint(i))&1 == 1 {
+ seed = hasher.HashChildren(h, seed)
+ }
+ }
+ return seed
+}
+
+// chainBorderRight chains proof hashes along tree borders. This differs from
+// inner chaining because |proof| contains only left-side subtree hashes.
+func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte {
+ for _, h := range proof {
+ seed = hasher.HashChildren(h, seed)
+ }
+ return seed
+}
diff --git a/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go
new file mode 100644
index 0000000000..b04f952ef8
--- /dev/null
+++ b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go
@@ -0,0 +1,68 @@
+// Copyright 2016 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rfc6962 provides hashing functionality according to RFC6962.
+package rfc6962
+
+import (
+ "crypto"
+ _ "crypto/sha256" // SHA256 is the default algorithm.
+)
+
+// Domain separation prefixes
+const (
+ RFC6962LeafHashPrefix = 0
+ RFC6962NodeHashPrefix = 1
+)
+
+// DefaultHasher is a SHA256 based LogHasher.
+var DefaultHasher = New(crypto.SHA256)
+
+// Hasher implements the RFC6962 tree hashing algorithm.
+type Hasher struct {
+ crypto.Hash
+}
+
+// New creates a new Hashers.LogHasher on the passed in hash function.
+func New(h crypto.Hash) *Hasher {
+ return &Hasher{Hash: h}
+}
+
+// EmptyRoot returns a special case for an empty tree.
+func (t *Hasher) EmptyRoot() []byte {
+ return t.New().Sum(nil)
+}
+
+// HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf.
+// The data in leaf is prefixed by the LeafHashPrefix.
+func (t *Hasher) HashLeaf(leaf []byte) []byte {
+ h := t.New()
+ h.Write([]byte{RFC6962LeafHashPrefix})
+ h.Write(leaf)
+ return h.Sum(nil)
+}
+
+// HashChildren returns the inner Merkle tree node hash of the two child nodes l and r.
+// The hashed structure is NodeHashPrefix||l||r.
+func (t *Hasher) HashChildren(l, r []byte) []byte {
+ h := t.New()
+ b := append(append(append(
+ make([]byte, 0, 1+len(l)+len(r)),
+ RFC6962NodeHashPrefix),
+ l...),
+ r...)
+
+ h.Write(b)
+ return h.Sum(nil)
+}
diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md
index d5f7872766..09825ca086 100644
--- a/vendor/github.com/vbauerster/mpb/v8/README.md
+++ b/vendor/github.com/vbauerster/mpb/v8/README.md
@@ -82,8 +82,8 @@ func main() {
mpb.AppendDecorators(
// replace ETA decorator with "done" message, OnComplete event
decor.OnComplete(
- // ETA decorator with ewma age of 60
- decor.EwmaETA(decor.ET_STYLE_GO, 60, decor.WCSyncWidth), "done",
+ // ETA decorator with ewma age of 30
+ decor.EwmaETA(decor.ET_STYLE_GO, 30, decor.WCSyncWidth), "done",
),
),
)
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/counters.go b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go
index 331c3df674..0420275783 100644
--- a/vendor/github.com/vbauerster/mpb/v8/decor/counters.go
+++ b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go
@@ -2,13 +2,6 @@ package decor
import (
"fmt"
- "strings"
-)
-
-const (
- _ = iota
- UnitKiB
- UnitKB
)
// CountersNoUnit is a wrapper around Counters with no unit param.
@@ -17,54 +10,60 @@ func CountersNoUnit(pairFmt string, wcc ...WC) Decorator {
}
// CountersKibiByte is a wrapper around Counters with predefined unit
-// UnitKiB (bytes/1024).
+// as SizeB1024(0).
func CountersKibiByte(pairFmt string, wcc ...WC) Decorator {
- return Counters(UnitKiB, pairFmt, wcc...)
+ return Counters(SizeB1024(0), pairFmt, wcc...)
}
// CountersKiloByte is a wrapper around Counters with predefined unit
-// UnitKB (bytes/1000).
+// as SizeB1000(0).
func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
- return Counters(UnitKB, pairFmt, wcc...)
+ return Counters(SizeB1000(0), pairFmt, wcc...)
}
// Counters decorator with dynamic unit measure adjustment.
//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+// `unit` one of [0|SizeB1024(0)|SizeB1000(0)]
//
-// `pairFmt` printf compatible verbs for current and total pair
+// `pairFmt` printf compatible verbs for current and total
//
// `wcc` optional WC config
//
-// pairFmt example if unit=UnitKB:
+// pairFmt example if unit=SizeB1000(0):
//
-// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB"
-// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB"
// pairFmt="%d / %d" output: "1MB / 12MB"
// pairFmt="% d / % d" output: "1 MB / 12 MB"
-func Counters(unit int, pairFmt string, wcc ...WC) Decorator {
- producer := func(unit int, pairFmt string) DecorFunc {
- if pairFmt == "" {
- pairFmt = "%d / %d"
- } else if strings.Count(pairFmt, "%") != 2 {
- panic("expected pairFmt with exactly 2 verbs")
- }
- switch unit {
- case UnitKiB:
+// pairFmt="%.1f / %.1f" output: "1.0MB / 12.0MB"
+// pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB"
+// pairFmt="%f / %f" output: "1.000000MB / 12.000000MB"
+// pairFmt="% f / % f" output: "1.000000 MB / 12.000000 MB"
+func Counters(unit interface{}, pairFmt string, wcc ...WC) Decorator {
+ producer := func() DecorFunc {
+ switch unit.(type) {
+ case SizeB1024:
+ if pairFmt == "" {
+ pairFmt = "% d / % d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(pairFmt, SizeB1024(s.Current), SizeB1024(s.Total))
}
- case UnitKB:
+ case SizeB1000:
+ if pairFmt == "" {
+ pairFmt = "% d / % d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(pairFmt, SizeB1000(s.Current), SizeB1000(s.Total))
}
default:
+ if pairFmt == "" {
+ pairFmt = "%d / %d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(pairFmt, s.Current, s.Total)
}
}
}
- return Any(producer(unit, pairFmt), wcc...)
+ return Any(producer(), wcc...)
}
// TotalNoUnit is a wrapper around Total with no unit param.
@@ -73,55 +72,60 @@ func TotalNoUnit(format string, wcc ...WC) Decorator {
}
// TotalKibiByte is a wrapper around Total with predefined unit
-// UnitKiB (bytes/1024).
+// as SizeB1024(0).
func TotalKibiByte(format string, wcc ...WC) Decorator {
- return Total(UnitKiB, format, wcc...)
+ return Total(SizeB1024(0), format, wcc...)
}
// TotalKiloByte is a wrapper around Total with predefined unit
-// UnitKB (bytes/1000).
+// as SizeB1000(0).
func TotalKiloByte(format string, wcc ...WC) Decorator {
- return Total(UnitKB, format, wcc...)
+ return Total(SizeB1000(0), format, wcc...)
}
// Total decorator with dynamic unit measure adjustment.
//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+// `unit` one of [0|SizeB1024(0)|SizeB1000(0)]
//
// `format` printf compatible verb for Total
//
// `wcc` optional WC config
//
-// format example if unit=UnitKiB:
+// format example if unit=SizeB1024(0):
//
-// format="%.1f" output: "12.0MiB"
-// format="% .1f" output: "12.0 MiB"
// format="%d" output: "12MiB"
// format="% d" output: "12 MiB"
-func Total(unit int, format string, wcc ...WC) Decorator {
- producer := func(unit int, format string) DecorFunc {
- if format == "" {
- format = "%d"
- } else if strings.Count(format, "%") != 1 {
- panic("expected format with exactly 1 verb")
- }
-
- switch unit {
- case UnitKiB:
+// format="%.1f" output: "12.0MiB"
+// format="% .1f" output: "12.0 MiB"
+// format="%f" output: "12.000000MiB"
+// format="% f" output: "12.000000 MiB"
+func Total(unit interface{}, format string, wcc ...WC) Decorator {
+ producer := func() DecorFunc {
+ switch unit.(type) {
+ case SizeB1024:
+ if format == "" {
+ format = "% d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1024(s.Total))
}
- case UnitKB:
+ case SizeB1000:
+ if format == "" {
+ format = "% d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1000(s.Total))
}
default:
+ if format == "" {
+ format = "%d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, s.Total)
}
}
}
- return Any(producer(unit, format), wcc...)
+ return Any(producer(), wcc...)
}
// CurrentNoUnit is a wrapper around Current with no unit param.
@@ -130,55 +134,60 @@ func CurrentNoUnit(format string, wcc ...WC) Decorator {
}
// CurrentKibiByte is a wrapper around Current with predefined unit
-// UnitKiB (bytes/1024).
+// as SizeB1024(0).
func CurrentKibiByte(format string, wcc ...WC) Decorator {
- return Current(UnitKiB, format, wcc...)
+ return Current(SizeB1024(0), format, wcc...)
}
// CurrentKiloByte is a wrapper around Current with predefined unit
-// UnitKB (bytes/1000).
+// as SizeB1000(0).
func CurrentKiloByte(format string, wcc ...WC) Decorator {
- return Current(UnitKB, format, wcc...)
+ return Current(SizeB1000(0), format, wcc...)
}
// Current decorator with dynamic unit measure adjustment.
//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+// `unit` one of [0|SizeB1024(0)|SizeB1000(0)]
//
// `format` printf compatible verb for Current
//
// `wcc` optional WC config
//
-// format example if unit=UnitKiB:
+// format example if unit=SizeB1024(0):
//
-// format="%.1f" output: "12.0MiB"
-// format="% .1f" output: "12.0 MiB"
// format="%d" output: "12MiB"
// format="% d" output: "12 MiB"
-func Current(unit int, format string, wcc ...WC) Decorator {
- producer := func(unit int, format string) DecorFunc {
- if format == "" {
- format = "%d"
- } else if strings.Count(format, "%") != 1 {
- panic("expected format with exactly 1 verb")
- }
-
- switch unit {
- case UnitKiB:
+// format="%.1f" output: "12.0MiB"
+// format="% .1f" output: "12.0 MiB"
+// format="%f" output: "12.000000MiB"
+// format="% f" output: "12.000000 MiB"
+func Current(unit interface{}, format string, wcc ...WC) Decorator {
+ producer := func() DecorFunc {
+ switch unit.(type) {
+ case SizeB1024:
+ if format == "" {
+ format = "% d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1024(s.Current))
}
- case UnitKB:
+ case SizeB1000:
+ if format == "" {
+ format = "% d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1000(s.Current))
}
default:
+ if format == "" {
+ format = "%d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, s.Current)
}
}
}
- return Any(producer(unit, format), wcc...)
+ return Any(producer(), wcc...)
}
// InvertedCurrentNoUnit is a wrapper around InvertedCurrent with no unit param.
@@ -187,53 +196,58 @@ func InvertedCurrentNoUnit(format string, wcc ...WC) Decorator {
}
// InvertedCurrentKibiByte is a wrapper around InvertedCurrent with predefined unit
-// UnitKiB (bytes/1024).
+// as SizeB1024(0).
func InvertedCurrentKibiByte(format string, wcc ...WC) Decorator {
- return InvertedCurrent(UnitKiB, format, wcc...)
+ return InvertedCurrent(SizeB1024(0), format, wcc...)
}
// InvertedCurrentKiloByte is a wrapper around InvertedCurrent with predefined unit
-// UnitKB (bytes/1000).
+// as SizeB1000(0).
func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator {
- return InvertedCurrent(UnitKB, format, wcc...)
+ return InvertedCurrent(SizeB1000(0), format, wcc...)
}
// InvertedCurrent decorator with dynamic unit measure adjustment.
//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+// `unit` one of [0|SizeB1024(0)|SizeB1000(0)]
//
// `format` printf compatible verb for InvertedCurrent
//
// `wcc` optional WC config
//
-// format example if unit=UnitKiB:
+// format example if unit=SizeB1024(0):
//
-// format="%.1f" output: "12.0MiB"
-// format="% .1f" output: "12.0 MiB"
// format="%d" output: "12MiB"
// format="% d" output: "12 MiB"
-func InvertedCurrent(unit int, format string, wcc ...WC) Decorator {
- producer := func(unit int, format string) DecorFunc {
- if format == "" {
- format = "%d"
- } else if strings.Count(format, "%") != 1 {
- panic("expected format with exactly 1 verb")
- }
-
- switch unit {
- case UnitKiB:
+// format="%.1f" output: "12.0MiB"
+// format="% .1f" output: "12.0 MiB"
+// format="%f" output: "12.000000MiB"
+// format="% f" output: "12.000000 MiB"
+func InvertedCurrent(unit interface{}, format string, wcc ...WC) Decorator {
+ producer := func() DecorFunc {
+ switch unit.(type) {
+ case SizeB1024:
+ if format == "" {
+ format = "% d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1024(s.Total-s.Current))
}
- case UnitKB:
+ case SizeB1000:
+ if format == "" {
+ format = "% d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, SizeB1000(s.Total-s.Current))
}
default:
+ if format == "" {
+ format = "%d"
+ }
return func(s Statistics) string {
return fmt.Sprintf(format, s.Total-s.Current)
}
}
}
- return Any(producer(unit, format), wcc...)
+ return Any(producer(), wcc...)
}
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go
index 3594e0185b..e33631dabc 100644
--- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go
+++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go
@@ -199,8 +199,7 @@ func chooseTimeProducer(style TimeStyle) func(time.Duration) string {
}
default:
return func(remaining time.Duration) string {
- // strip off nanoseconds
- return ((remaining / time.Second) * time.Second).String()
+ return remaining.Truncate(time.Second).String()
}
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go
index 862ae33d28..65a8d9daec 100644
--- a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go
+++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go
@@ -34,8 +34,7 @@ type onAbortWrapper struct {
func (d *onAbortWrapper) Decor(s Statistics) string {
if s.Aborted {
- wc := d.GetConf()
- return wc.FormatMsg(d.msg)
+ return d.GetConf().FormatMsg(d.msg)
}
return d.Decorator.Decor(s)
}
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go
index 6ee9268448..0a3897b818 100644
--- a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go
+++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go
@@ -33,8 +33,7 @@ type onCompleteWrapper struct {
func (d *onCompleteWrapper) Decor(s Statistics) string {
if s.Completed {
- wc := d.GetConf()
- return wc.FormatMsg(d.msg)
+ return d.GetConf().FormatMsg(d.msg)
}
return d.Decorator.Decor(s)
}
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go
index 1d3b3a9e00..9709c196cc 100644
--- a/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go
@@ -7,24 +7,25 @@ import (
"github.com/vbauerster/mpb/v8/internal"
)
+var _ fmt.Formatter = percentageType(0)
+
type percentageType float64
func (s percentageType) Format(st fmt.State, verb rune) {
- var prec int
+ prec := -1
switch verb {
- case 'd':
- case 's':
- prec = -1
- default:
+ case 'f', 'e', 'E':
+ prec = 6 // default prec of fmt.Printf("%f|%e|%E")
+ fallthrough
+ case 'b', 'g', 'G', 'x', 'X':
if p, ok := st.Precision(); ok {
prec = p
- } else {
- prec = 6
}
+ default:
+ verb, prec = 'f', 0
}
- p := bytesPool.Get().(*[]byte)
- b := strconv.AppendFloat(*p, float64(s), 'f', prec, 64)
+ b := strconv.AppendFloat(make([]byte, 0, 16), float64(s), byte(verb), prec, 64)
if st.Flag(' ') {
b = append(b, ' ', '%')
} else {
@@ -34,7 +35,6 @@ func (s percentageType) Format(st fmt.State, verb rune) {
if err != nil {
panic(err)
}
- bytesPool.Put(p)
}
// Percentage returns percentage decorator. It's a wrapper of NewPercentage.
@@ -44,12 +44,18 @@ func Percentage(wcc ...WC) Decorator {
// NewPercentage percentage decorator with custom format string.
//
+// `format` printf compatible verb
+//
+// `wcc` optional WC config
+//
// format examples:
//
-// format="%.1f" output: "1.0%"
-// format="% .1f" output: "1.0 %"
// format="%d" output: "1%"
// format="% d" output: "1 %"
+// format="%.1f" output: "1.0%"
+// format="% .1f" output: "1.0 %"
+// format="%f" output: "1.000000%"
+// format="% f" output: "1.000000 %"
func NewPercentage(format string, wcc ...WC) Decorator {
if format == "" {
format = "% d"
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/pool.go b/vendor/github.com/vbauerster/mpb/v8/decor/pool.go
deleted file mode 100644
index cefa9bfac2..0000000000
--- a/vendor/github.com/vbauerster/mpb/v8/decor/pool.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package decor
-
-import "sync"
-
-var bytesPool = sync.Pool{
- New: func() interface{} {
- b := make([]byte, 0, 32)
- return &b
- },
-}
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go
index 4df27095f3..d9950b61c9 100644
--- a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go
+++ b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go
@@ -8,6 +8,13 @@ import (
//go:generate stringer -type=SizeB1024 -trimprefix=_i
//go:generate stringer -type=SizeB1000 -trimprefix=_
+var (
+ _ fmt.Formatter = SizeB1024(0)
+ _ fmt.Stringer = SizeB1024(0)
+ _ fmt.Formatter = SizeB1000(0)
+ _ fmt.Stringer = SizeB1000(0)
+)
+
const (
_ib SizeB1024 = iota + 1
_iKiB SizeB1024 = 1 << (iota * 10)
@@ -22,17 +29,17 @@ const (
type SizeB1024 int64
func (self SizeB1024) Format(st fmt.State, verb rune) {
- var prec int
+ prec := -1
switch verb {
- case 'd':
- case 's':
- prec = -1
- default:
+ case 'f', 'e', 'E':
+ prec = 6 // default prec of fmt.Printf("%f|%e|%E")
+ fallthrough
+ case 'b', 'g', 'G', 'x', 'X':
if p, ok := st.Precision(); ok {
prec = p
- } else {
- prec = 6
}
+ default:
+ verb, prec = 'f', 0
}
var unit SizeB1024
@@ -49,8 +56,7 @@ func (self SizeB1024) Format(st fmt.State, verb rune) {
unit = _iTiB
}
- p := bytesPool.Get().(*[]byte)
- b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64)
+ b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64)
if st.Flag(' ') {
b = append(b, ' ')
}
@@ -59,7 +65,6 @@ func (self SizeB1024) Format(st fmt.State, verb rune) {
if err != nil {
panic(err)
}
- bytesPool.Put(p)
}
const (
@@ -76,17 +81,17 @@ const (
type SizeB1000 int64
func (self SizeB1000) Format(st fmt.State, verb rune) {
- var prec int
+ prec := -1
switch verb {
- case 'd':
- case 's':
- prec = -1
- default:
+ case 'f', 'e', 'E':
+ prec = 6 // default prec of fmt.Printf("%f|%e|%E")
+ fallthrough
+ case 'b', 'g', 'G', 'x', 'X':
if p, ok := st.Precision(); ok {
prec = p
- } else {
- prec = 6
}
+ default:
+ verb, prec = 'f', 0
}
var unit SizeB1000
@@ -103,8 +108,7 @@ func (self SizeB1000) Format(st fmt.State, verb rune) {
unit = _TB
}
- p := bytesPool.Get().(*[]byte)
- b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64)
+ b := strconv.AppendFloat(make([]byte, 0, 24), float64(self)/float64(unit), byte(verb), prec, 64)
if st.Flag(' ') {
b = append(b, ' ')
}
@@ -113,5 +117,4 @@ func (self SizeB1000) Format(st fmt.State, verb rune) {
if err != nil {
panic(err)
}
- bytesPool.Put(p)
}
diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go
index dd0ad70014..d4f644704f 100644
--- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go
+++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go
@@ -39,7 +39,7 @@ func (self speedFormatter) Format(st fmt.State, verb rune) {
// EwmaSpeed exponential-weighted-moving-average based speed decorator.
// For this decorator to work correctly you have to measure each iteration's
// duration and pass it to one of the (*Bar).EwmaIncr... family methods.
-func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator {
+func EwmaSpeed(unit interface{}, format string, age float64, wcc ...WC) Decorator {
var average ewma.MovingAverage
if age == 0 {
average = ewma.NewMovingAverage()
@@ -52,7 +52,7 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator {
// MovingAverageSpeed decorator relies on MovingAverage implementation
// to calculate its average.
//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+// `unit` one of [0|SizeB1024(0)|SizeB1000(0)]
//
// `format` printf compatible verb for value, like "%f" or "%d"
//
@@ -62,14 +62,11 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator {
//
// format examples:
//
-// unit=UnitKiB, format="%.1f" output: "1.0MiB/s"
-// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s"
-// unit=UnitKB, format="%.1f" output: "1.0MB/s"
-// unit=UnitKB, format="% .1f" output: "1.0 MB/s"
-func MovingAverageSpeed(unit int, format string, average ewma.MovingAverage, wcc ...WC) Decorator {
- if format == "" {
- format = "%.0f"
- }
+// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s"
+// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s"
+// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s"
+// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s"
+func MovingAverageSpeed(unit interface{}, format string, average ewma.MovingAverage, wcc ...WC) Decorator {
d := &movingAverageSpeed{
WC: initWC(wcc...),
average: average,
@@ -106,14 +103,14 @@ func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) {
// AverageSpeed decorator with dynamic unit measure adjustment. It's
// a wrapper of NewAverageSpeed.
-func AverageSpeed(unit int, format string, wcc ...WC) Decorator {
+func AverageSpeed(unit interface{}, format string, wcc ...WC) Decorator {
return NewAverageSpeed(unit, format, time.Now(), wcc...)
}
// NewAverageSpeed decorator with dynamic unit measure adjustment and
// user provided start time.
//
-// `unit` one of [0|UnitKiB|UnitKB] zero for no unit
+// `unit` one of [0|SizeB1024(0)|SizeB1000(0)]
//
// `format` printf compatible verb for value, like "%f" or "%d"
//
@@ -123,14 +120,11 @@ func AverageSpeed(unit int, format string, wcc ...WC) Decorator {
//
// format examples:
//
-// unit=UnitKiB, format="%.1f" output: "1.0MiB/s"
-// unit=UnitKiB, format="% .1f" output: "1.0 MiB/s"
-// unit=UnitKB, format="%.1f" output: "1.0MB/s"
-// unit=UnitKB, format="% .1f" output: "1.0 MB/s"
-func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator {
- if format == "" {
- format = "%.0f"
- }
+// unit=SizeB1024(0), format="%.1f" output: "1.0MiB/s"
+// unit=SizeB1024(0), format="% .1f" output: "1.0 MiB/s"
+// unit=SizeB1000(0), format="%.1f" output: "1.0MB/s"
+// unit=SizeB1000(0), format="% .1f" output: "1.0 MB/s"
+func NewAverageSpeed(unit interface{}, format string, startTime time.Time, wcc ...WC) Decorator {
d := &averageSpeed{
WC: initWC(wcc...),
startTime: startTime,
@@ -151,7 +145,6 @@ func (d *averageSpeed) Decor(s Statistics) string {
speed := float64(s.Current) / float64(time.Since(d.startTime))
d.msg = d.producer(speed * 1e9)
}
-
return d.FormatMsg(d.msg)
}
@@ -159,17 +152,26 @@ func (d *averageSpeed) AverageAdjust(startTime time.Time) {
d.startTime = startTime
}
-func chooseSpeedProducer(unit int, format string) func(float64) string {
- switch unit {
- case UnitKiB:
+func chooseSpeedProducer(unit interface{}, format string) func(float64) string {
+ switch unit.(type) {
+ case SizeB1024:
+ if format == "" {
+ format = "% d"
+ }
return func(speed float64) string {
return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed))))
}
- case UnitKB:
+ case SizeB1000:
+ if format == "" {
+ format = "% d"
+ }
return func(speed float64) string {
return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed))))
}
default:
+ if format == "" {
+ format = "%f"
+ }
return func(speed float64) string {
return fmt.Sprintf(format, speed)
}
diff --git a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go
index 678dd7c9f3..1b2364f770 100644
--- a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go
+++ b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go
@@ -46,6 +46,7 @@ func (m heapManager) run() {
var sync bool
for req := range m {
+ next:
switch req.cmd {
case h_push:
data := req.data.(pushData)
@@ -78,7 +79,8 @@ func (m heapManager) run() {
select {
case data.iter <- b:
case <-data.drop:
- break
+ close(data.iter)
+ break next
}
}
close(data.iter)
@@ -88,7 +90,8 @@ func (m heapManager) run() {
select {
case data.iter <- heap.Pop(&bHeap).(*Bar):
case <-data.drop:
- break
+ close(data.iter)
+ break next
}
}
close(data.iter)
diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go
index 9bb557ee45..cc4e3e1022 100644
--- a/vendor/github.com/vbauerster/mpb/v8/progress.go
+++ b/vendor/github.com/vbauerster/mpb/v8/progress.go
@@ -19,7 +19,7 @@ const (
)
// DoneError represents an error when `*mpb.Progress` is done but its functionality is requested.
-var DoneError = fmt.Errorf("%T instance can't be reused after it's done!", (*Progress)(nil))
+var DoneError = fmt.Errorf("%T instance can't be reused after it's done", (*Progress)(nil))
// Progress represents a container that renders one or more progress bars.
type Progress struct {
@@ -351,7 +351,7 @@ func (s *pState) render(cw *cwriter.Writer) (err error) {
}
func (s *pState) flush(cw *cwriter.Writer, height int) error {
- wg := new(sync.WaitGroup)
+ var wg sync.WaitGroup
defer wg.Wait() // waiting for all s.hm.push to complete
var popCount int
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
index 0b605b3d67..cf29d10a7c 100644
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -7,6 +7,8 @@ Thumbs.db
*.iml
*.so
coverage.*
+go.work
+go.work.sum
gen/
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 0f099f5759..e28904f6ac 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -85,6 +85,8 @@ linters-settings:
- "**/internal/matchers/*.go"
godot:
exclude:
+ # Exclude links.
+ - '^ *\[[^]]+\]:'
# Exclude sentence fragments for lists.
- '^[ ]*[-•]'
# Exclude sentences prefixing a list.
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
index 32e4812755..40d62fa2eb 100644
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -2,3 +2,5 @@ http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 33609fb510..6a617aba78 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,200 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.15.0/0.38.0] 2023-04-27
+
+### Added
+
+- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916)
+- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949)
+- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970)
+- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971)
+ - The `AddConfig` used to hold configuration for addition measurements
+ - `NewAddConfig` used to create a new `AddConfig`
+ - `AddOption` used to configure an `AddConfig`
+ - The `RecordConfig` used to hold configuration for recorded measurements
+ - `NewRecordConfig` used to create a new `RecordConfig`
+ - `RecordOption` used to configure a `RecordConfig`
+ - The `ObserveConfig` used to hold configuration for observed measurements
+ - `NewObserveConfig` used to create a new `ObserveConfig`
+ - `ObserveOption` used to configure an `ObserveConfig`
+- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`.
+ They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956)
+
+### Changed
+
+- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870)
+- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`.
+ This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916)
+- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941)
+ - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider`
+- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966)
+- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974)
+- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+ - The `Int64Counter.Add` method now accepts `...AddOption`
+ - The `Float64Counter.Add` method now accepts `...AddOption`
+ - The `Int64UpDownCounter.Add` method now accepts `...AddOption`
+ - The `Float64UpDownCounter.Add` method now accepts `...AddOption`
+ - The `Int64Histogram.Record` method now accepts `...RecordOption`
+ - The `Float64Histogram.Record` method now accepts `...RecordOption`
+ - The `Int64Observer.Observe` method now accepts `...ObserveOption`
+ - The `Float64Observer.Observe` method now accepts `...ObserveOption`
+- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+ - The `Observer.ObserveInt64` method now accepts `...ObserveOption`
+ - The `Observer.ObserveFloat64` method now accepts `...ObserveOption`
+- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986)
+
+### Fixed
+
+- `TracerProvider` allows calling `Tracer()` while it's shutting down.
+ It used to deadlock. (#3924)
+- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949)
+- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951)
+- Automatically figure out the default aggregation with `aggregation.Default`. (#3967)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated.
+ Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018)
+
+## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812)
+- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828)
+- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+ Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849)
+- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895)
+- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900)
+- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854)
+
+### Changed
+
+- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832)
+- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832)
+- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833)
+- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844)
+- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849)
+- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853)
+- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892)
+- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900)
+
+### Fixed
+
+- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829)
+- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892)
+- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+ Use the added `float64` instrument configuration instead. (#3895)
+- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+ Use the added `int64` instrument configuration instead. (#3895)
+- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893)
+
+## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+This release drops the compatibility guarantee of [Go 1.18].
+
+### Added
+
+- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818)
+ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
+ - Use `GetMeterProivder` for a global `metric.MeterProvider`.
+ - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
+
+### Changed
+
+- Dropped compatibility testing for [Go 1.18].
+ The project no longer guarantees support for this version of Go. (#3813)
+
+### Fixed
+
+- Handle empty environment variable as it they were not set. (#3764)
+- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/global` package is deprecated.
+ Use `go.opentelemetry.io/otel` instead. (#3818)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814)
+
+## [1.14.0/0.37.0/0.0.4] 2023-02-27
+
+This release is the last to support [Go 1.18].
+The next release will require at least [Go 1.19].
+
+### Added
+
+- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
+- Support [Go 1.20]. (#3693)
+- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
+ The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
+ - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeNameKey` -> `OTelScopeNameKey`
+ - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
+ - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
+ - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
+ - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
+ - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
+ - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
+ - `OtelStatusCodeError` -> `OTelStatusCodeError`
+ - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeName` -> `OTelScopeName`
+ - `OtelScopeVersion` -> `OTelScopeVersion`
+ - `OtelLibraryName` -> `OTelLibraryName`
+ - `OtelLibraryVersion` -> `OTelLibraryVersion`
+ - `OtelStatusDescription` -> `OTelStatusDescription`
+- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
+ See the [README](./bridge/opentracing/README.md) for more information. (#3570)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
+- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
+ - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
+ - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
+
+### Changed
+
+- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
+- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
+ This change is made to enable memory reuse by SDK users. (#3732)
+- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
+
+### Fixed
+
+- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
+- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
+- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
+- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
+- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
+- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
+- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
+ Use the equivalent unit string instead. (#3776)
+ - Use `"1"` instead of `unit.Dimensionless`
+ - Use `"By"` instead of `unit.Bytes`
+ - Use `"ms"` instead of `unit.Milliseconds`
+
## [1.13.0/0.36.0] 2023-02-07
### Added
@@ -62,7 +256,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
- These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
+ These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
- `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
- `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
- `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
@@ -85,7 +279,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
### Changed
- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
-- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507)
+- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507)
- Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
- Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
- Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
@@ -98,7 +292,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
-- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name.
+- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name.
Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
@@ -207,7 +401,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
-- Reenabled Attribute Filters in the Metric SDK. (#3396)
+- Re-enabled Attribute Filters in the Metric SDK. (#3396)
- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
@@ -788,7 +982,7 @@ This release includes an API and SDK for the tracing signal that will comply wit
- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
-- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195)
+- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195)
- Fixed typos in resources.go. (#2201)
## [1.0.0-RC2] - 2021-07-26
@@ -1234,7 +1428,7 @@ with major version 0.
- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
- Added documentation about the project's versioning policy. (#1388)
- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
-- Added codeql worfklow to GitHub Actions (#1428)
+- Added codeql workflow to GitHub Actions (#1428)
- Added Gosec workflow to GitHub Actions (#1429)
- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
- Add an OpenCensus exporter bridge. (#1444)
@@ -2077,7 +2271,7 @@ There is still a possibility of breaking changes.
### Fixed
-- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428)
+- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428)
## [0.2.1] - 2020-01-08
@@ -2243,7 +2437,11 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.13.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.15.0...HEAD
+[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
+[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
+[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
+[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
@@ -2303,3 +2501,7 @@ It contains api and sdk for trace and meter.
[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+[Go 1.20]: https://go.dev/doc/go1.20
+[Go 1.19]: https://go.dev/doc/go1.19
+[Go 1.18]: https://go.dev/doc/go1.18
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index c4012ed6ca..f6f6a313b5 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
-* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
+* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 9371a481ab..f521d65431 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -6,7 +6,7 @@ OpenTelemetry
repo for information on this and other language SIGs.
See the [public meeting
-notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
+notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit)
for a summary description of past meetings. To request edit access,
join the meeting or get in touch on
[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
@@ -94,30 +94,58 @@ request ID to the entry you added to `CHANGELOG.md`.
### How to Get PRs Merged
-A PR is considered to be **ready to merge** when:
-
-* It has received two approvals from Collaborators/Maintainers (at
- different companies). This is not enforced through technical means
- and a PR may be **ready to merge** with a single approval if the change
- and its approach have been discussed and consensus reached.
-* Feedback has been addressed.
-* Any substantive changes to your PR will require that you clear any prior
- Approval reviews, this includes changes resulting from other feedback. Unless
- the approver explicitly stated that their approval will persist across
- changes it should be assumed that the PR needs their review again. Other
- project members (e.g. approvers, maintainers) can help with this if there are
- any questions or if you forget to clear reviews.
-* It has been open for review for at least one working day. This gives
- people reasonable time to review.
-* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
- one day and may be merged with a single Maintainer's approval.
-* `CHANGELOG.md` has been updated to reflect what has been
- added, changed, removed, or fixed.
-* `README.md` has been updated if necessary.
-* Urgent fix can take exception as long as it has been actively
- communicated.
-
-Any Maintainer can merge the PR once it is **ready to merge**.
+A PR is considered **ready to merge** when:
+
+* It has received two qualified approvals[^1].
+
+ This is not enforced through automation, but needs to be validated by the
+ maintainer merging.
+ * The qualified approvals need to be from [Approver]s/[Maintainer]s
+ affiliated with different companies. Two qualified approvals from
+ [Approver]s or [Maintainer]s affiliated with the same company counts as a
+ single qualified approval.
+ * PRs introducing changes that have already been discussed and consensus
+ reached only need one qualified approval. The discussion and resolution
+ needs to be linked to the PR.
+ * Trivial changes[^2] only need one qualified approval.
+
+* All feedback has been addressed.
+ * All PR comments and suggestions are resolved.
+ * All GitHub Pull Request reviews with a status of "Request changes" have
+ been addressed. Another review by the objecting reviewer with a different
+ status can be submitted to clear the original review, or the review can be
+ dismissed by a [Maintainer] when the issues from the original review have
+ been addressed.
+ * Any comments or reviews that cannot be resolved between the PR author and
+ reviewers can be submitted to the community [Approver]s and [Maintainer]s
+ during the weekly SIG meeting. If consensus is reached among the
+ [Approver]s and [Maintainer]s during the SIG meeting the objections to the
+ PR may be dismissed or resolved or the PR closed by a [Maintainer].
+ * Any substantive changes to the PR require existing Approval reviews be
+ cleared unless the approver explicitly states that their approval persists
+ across changes. This includes changes resulting from other feedback.
+ [Approver]s and [Maintainer]s can help in clearing reviews and they should
+ be consulted if there are any questions.
+
+* The PR branch is up to date with the base branch it is merging into.
+ * To ensure this does not block the PR, it should be configured to allow
+ maintainers to update it.
+
+* It has been open for review for at least one working day. This gives people
+ reasonable time to review.
+ * Trivial changes[^2] do not have to wait for one day and may be merged with
+ a single [Maintainer]'s approval.
+
+* All required GitHub workflows have succeeded.
+* Urgent fix can take exception as long as it has been actively communicated
+ among [Maintainer]s.
+
+Any [Maintainer] can merge the PR once the above criteria have been met.
+
+[^1]: A qualified approval is a GitHub Pull Request review with "Approve"
+ status from an OpenTelemetry Go [Approver] or [Maintainer].
+[^2]: Trivial changes include: typo corrections, cosmetic non-substantive
+ changes, documentation corrections or updates, dependency updates, etc.
## Design Choices
@@ -216,7 +244,7 @@ Meaning a `config` from one package should not be directly used by another. The
one exception is the API packages. The configs from the base API, eg.
`go.opentelemetry.io/otel/trace.TracerConfig` and
`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
-by the SDK therefor it is expected that these are exported.
+by the SDK therefore it is expected that these are exported.
When a config is exported we want to maintain forward and backward
compatibility, to achieve this no fields should be exported but should
@@ -234,12 +262,12 @@ func newConfig(options ...Option) config {
for _, option := range options {
config = option.apply(config)
}
- // Preform any validation here.
+ // Perform any validation here.
return config
}
```
-If validation of the `config` options is also preformed this can return an
+If validation of the `config` options is also performed this can return an
error as well that is expected to be handled by the instantiation function
or propagated to the user.
@@ -438,7 +466,7 @@ their parameters appropriately named.
#### Interface Stability
All exported stable interfaces that include the following warning in their
-doumentation are allowed to be extended with additional methods.
+documentation are allowed to be extended with additional methods.
> Warning: methods may be added to this interface in minor releases.
@@ -500,27 +528,30 @@ interface that defines the specific functionality should be preferred.
## Approvers and Maintainers
-Approvers:
+### Approvers
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
-- [Josh MacDonald](https://github.com/jmacd), LightStep
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
- [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
-- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta
+- [Damien Mathieu](https://github.com/dmathieu), Elastic
-Maintainers:
+### Maintainers
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Tyler Yahn](https://github.com/MrAlias), Splunk
-Emeritus:
+### Emeritus
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
+- [Josh MacDonald](https://github.com/jmacd), LightStep
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
+
+[Approver]: #approvers
+[Maintainer]: #maintainers
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index befb040a77..91ead91b41 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -156,7 +156,7 @@ go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
- && $(GO) mod tidy -compat=1.18
+ && $(GO) mod tidy -compat=1.19
.PHONY: lint-modules
lint-modules: go-mod-tidy
@@ -210,8 +210,9 @@ SEMCONVPKG ?= "semconv/"
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
+ $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: prerelease
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 1b2ee21fbf..e138a8a07f 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -14,7 +14,7 @@ It provides a set of APIs to directly measure performance and behavior of your s
| Signal | Status | Project |
| ------- | ---------- | ------- |
| Traces | Stable | N/A |
-| Metrics | Alpha | N/A |
+| Metrics | Beta | N/A |
| Logs | Frozen [1] | N/A |
- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
@@ -50,16 +50,16 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
+| Ubuntu | 1.20 | amd64 |
| Ubuntu | 1.19 | amd64 |
-| Ubuntu | 1.18 | amd64 |
+| Ubuntu | 1.20 | 386 |
| Ubuntu | 1.19 | 386 |
-| Ubuntu | 1.18 | 386 |
+| MacOS | 1.20 | amd64 |
| MacOS | 1.19 | amd64 |
-| MacOS | 1.18 | amd64 |
+| Windows | 1.20 | amd64 |
| Windows | 1.19 | amd64 |
-| Windows | 1.18 | amd64 |
+| Windows | 1.20 | 386 |
| Windows | 1.19 | 386 |
-| Windows | 1.18 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 26be598322..b976367e46 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -18,6 +18,7 @@ import (
"encoding/json"
"reflect"
"sort"
+ "sync"
)
type (
@@ -62,6 +63,12 @@ var (
iface: [0]KeyValue{},
},
}
+
+ // sortables is a pool of Sortables used to create Sets with a user does
+ // not provide one.
+ sortables = sync.Pool{
+ New: func() interface{} { return new(Sortable) },
+ }
)
// EmptySet returns a reference to a Set with no elements.
@@ -91,7 +98,7 @@ func (l *Set) Len() int {
// Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) {
- if l == nil {
+ if l == nil || !l.equivalent.Valid() {
return KeyValue{}, false
}
value := l.equivalent.reflectValue()
@@ -107,7 +114,7 @@ func (l *Set) Get(idx int) (KeyValue, bool) {
// Value returns the value of a specified key in this set.
func (l *Set) Value(k Key) (Value, bool) {
- if l == nil {
+ if l == nil || !l.equivalent.Valid() {
return Value{}, false
}
rValue := l.equivalent.reflectValue()
@@ -191,7 +198,9 @@ func NewSet(kvs ...KeyValue) Set {
if len(kvs) == 0 {
return empty()
}
- s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
+ srt := sortables.Get().(*Sortable)
+ s, _ := NewSetWithSortableFiltered(kvs, srt, nil)
+ sortables.Put(srt)
return s
}
@@ -218,7 +227,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if len(kvs) == 0 {
return empty(), nil
}
- return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
+ srt := sortables.Get().(*Sortable)
+ s, filtered := NewSetWithSortableFiltered(kvs, srt, filter)
+ sortables.Put(srt)
+ return s, filtered
}
// NewSetWithSortableFiltered returns a new Set.
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index 34a4e548dd..cb21dd5c09 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -68,7 +68,7 @@ func BoolValue(v bool) Value {
// BoolSliceValue creates a BOOLSLICE Value.
func BoolSliceValue(v []bool) Value {
- return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)}
+ return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
}
// IntValue creates an INT64 Value.
@@ -99,7 +99,7 @@ func Int64Value(v int64) Value {
// Int64SliceValue creates an INT64SLICE Value.
func Int64SliceValue(v []int64) Value {
- return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)}
+ return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
}
// Float64Value creates a FLOAT64 Value.
@@ -112,7 +112,7 @@ func Float64Value(v float64) Value {
// Float64SliceValue creates a FLOAT64SLICE Value.
func Float64SliceValue(v []float64) Value {
- return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)}
+ return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
}
// StringValue creates a STRING Value.
@@ -125,7 +125,7 @@ func StringValue(v string) Value {
// StringSliceValue creates a STRINGSLICE Value.
func StringSliceValue(v []string) Value {
- return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)}
+ return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
}
// Type returns a type of the Value.
@@ -149,7 +149,7 @@ func (v Value) AsBoolSlice() []bool {
}
func (v Value) asBoolSlice() []bool {
- return attribute.AsSlice[bool](v.slice)
+ return attribute.AsBoolSlice(v.slice)
}
// AsInt64 returns the int64 value. Make sure that the Value's type is
@@ -168,7 +168,7 @@ func (v Value) AsInt64Slice() []int64 {
}
func (v Value) asInt64Slice() []int64 {
- return attribute.AsSlice[int64](v.slice)
+ return attribute.AsInt64Slice(v.slice)
}
// AsFloat64 returns the float64 value. Make sure that the Value's
@@ -187,7 +187,7 @@ func (v Value) AsFloat64Slice() []float64 {
}
func (v Value) asFloat64Slice() []float64 {
- return attribute.AsSlice[float64](v.slice)
+ return attribute.AsFloat64Slice(v.slice)
}
// AsString returns the string value. Make sure that the Value's type
@@ -206,7 +206,7 @@ func (v Value) AsStringSlice() []string {
}
func (v Value) asStringSlice() []string {
- return attribute.AsSlice[string](v.slice)
+ return attribute.AsStringSlice(v.slice)
}
type unknownValueType struct{}
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
index ecd363ab51..4115fe3bbb 100644
--- a/vendor/go.opentelemetry.io/otel/handler.go
+++ b/vendor/go.opentelemetry.io/otel/handler.go
@@ -15,58 +15,16 @@
package otel // import "go.opentelemetry.io/otel"
import (
- "log"
- "os"
- "sync/atomic"
- "unsafe"
+ "go.opentelemetry.io/otel/internal/global"
)
var (
- // globalErrorHandler provides an ErrorHandler that can be used
- // throughout an OpenTelemetry instrumented project. When a user
- // specified ErrorHandler is registered (`SetErrorHandler`) all calls to
- // `Handle` and will be delegated to the registered ErrorHandler.
- globalErrorHandler = defaultErrorHandler()
-
- // Compile-time check that delegator implements ErrorHandler.
- _ ErrorHandler = (*delegator)(nil)
- // Compile-time check that errLogger implements ErrorHandler.
- _ ErrorHandler = (*errLogger)(nil)
+ // Compile-time check global.ErrDelegator implements ErrorHandler.
+ _ ErrorHandler = (*global.ErrDelegator)(nil)
+ // Compile-time check global.ErrLogger implements ErrorHandler.
+ _ ErrorHandler = (*global.ErrLogger)(nil)
)
-type delegator struct {
- delegate unsafe.Pointer
-}
-
-func (d *delegator) Handle(err error) {
- d.getDelegate().Handle(err)
-}
-
-func (d *delegator) getDelegate() ErrorHandler {
- return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
-}
-
-// setDelegate sets the ErrorHandler delegate.
-func (d *delegator) setDelegate(eh ErrorHandler) {
- atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
-}
-
-func defaultErrorHandler() *delegator {
- d := &delegator{}
- d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
- return d
-}
-
-// errLogger logs errors if no delegate is set, otherwise they are delegated.
-type errLogger struct {
- l *log.Logger
-}
-
-// Handle logs err if no delegate is set, otherwise it is delegated.
-func (h *errLogger) Handle(err error) {
- h.l.Print(err)
-}
-
// GetErrorHandler returns the global ErrorHandler instance.
//
// The default ErrorHandler instance returned will log all errors to STDERR
@@ -76,9 +34,7 @@ func (h *errLogger) Handle(err error) {
//
// Subsequent calls to SetErrorHandler after the first will not forward errors
// to the new ErrorHandler for prior returned instances.
-func GetErrorHandler() ErrorHandler {
- return globalErrorHandler
-}
+func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
// SetErrorHandler sets the global ErrorHandler to h.
//
@@ -86,11 +42,7 @@ func GetErrorHandler() ErrorHandler {
// GetErrorHandler will send errors to h instead of the default logging
// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
// delegate errors to h.
-func SetErrorHandler(h ErrorHandler) {
- globalErrorHandler.setDelegate(h)
-}
+func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
// Handle is a convenience function for ErrorHandler().Handle(err).
-func Handle(err error) {
- GetErrorHandler().Handle(err)
-}
+func Handle(err error) { global.Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
index 2203489447..622c3ee3f2 100644
--- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
@@ -22,24 +22,90 @@ import (
"reflect"
)
-// SliceValue convert a slice into an array with same elements as slice.
-func SliceValue[T bool | int64 | float64 | string](v []T) any {
- var zero T
+// BoolSliceValue converts a bool slice into an array with same elements as slice.
+func BoolSliceValue(v []bool) interface{} {
+ var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]T), v)
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
return cp.Elem().Interface()
}
-// AsSlice convert an array into a slice into with same elements as array.
-func AsSlice[T bool | int64 | float64 | string](v any) []T {
+// Int64SliceValue converts an int64 slice into an array with same elements as slice.
+func Int64SliceValue(v []int64) interface{} {
+ var zero int64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
+ return cp.Elem().Interface()
+}
+
+// Float64SliceValue converts a float64 slice into an array with same elements as slice.
+func Float64SliceValue(v []float64) interface{} {
+ var zero float64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
+ return cp.Elem().Interface()
+}
+
+// StringSliceValue converts a string slice into an array with same elements as slice.
+func StringSliceValue(v []string) interface{} {
+ var zero string
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
+ copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
+ return cp.Elem().Interface()
+}
+
+// AsBoolSlice converts a bool array into a slice into with same elements as array.
+func AsBoolSlice(v interface{}) []bool {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ var zero bool
+ correctLen := rv.Len()
+ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+ cpy := reflect.New(correctType)
+ _ = reflect.Copy(cpy.Elem(), rv)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
+}
+
+// AsInt64Slice converts an int64 array into a slice into with same elements as array.
+func AsInt64Slice(v interface{}) []int64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ var zero int64
+ correctLen := rv.Len()
+ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+ cpy := reflect.New(correctType)
+ _ = reflect.Copy(cpy.Elem(), rv)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
+}
+
+// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
+func AsFloat64Slice(v interface{}) []float64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ var zero float64
+ correctLen := rv.Len()
+ correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
+ cpy := reflect.New(correctType)
+ _ = reflect.Copy(cpy.Elem(), rv)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
+}
+
+// AsStringSlice converts a string array into a slice into with same elements as array.
+func AsStringSlice(v interface{}) []string {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero T
+ var zero string
correctLen := rv.Len()
correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
cpy := reflect.New(correctType)
_ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]T)
+ return cpy.Elem().Slice(0, correctLen).Interface().([]string)
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
new file mode 100644
index 0000000000..3dcd1caae6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "log"
+ "os"
+ "sync/atomic"
+ "unsafe"
+)
+
+var (
+ // GlobalErrorHandler provides an ErrorHandler that can be used
+ // throughout an OpenTelemetry instrumented project. When a user
+ // specified ErrorHandler is registered (`SetErrorHandler`) all calls to
+ // `Handle` and will be delegated to the registered ErrorHandler.
+ GlobalErrorHandler = defaultErrorHandler()
+
+ // Compile-time check that delegator implements ErrorHandler.
+ _ ErrorHandler = (*ErrDelegator)(nil)
+ // Compile-time check that errLogger implements ErrorHandler.
+ _ ErrorHandler = (*ErrLogger)(nil)
+)
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+ // Handle handles any error deemed irremediable by an OpenTelemetry
+ // component.
+ Handle(error)
+}
+
+type ErrDelegator struct {
+ delegate unsafe.Pointer
+}
+
+func (d *ErrDelegator) Handle(err error) {
+ d.getDelegate().Handle(err)
+}
+
+func (d *ErrDelegator) getDelegate() ErrorHandler {
+ return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
+}
+
+// setDelegate sets the ErrorHandler delegate.
+func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
+ atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
+}
+
+func defaultErrorHandler() *ErrDelegator {
+ d := &ErrDelegator{}
+ d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
+ return d
+}
+
+// ErrLogger logs errors if no delegate is set, otherwise they are delegated.
+type ErrLogger struct {
+ l *log.Logger
+}
+
+// Handle logs err if no delegate is set, otherwise it is delegated.
+func (h *ErrLogger) Handle(err error) {
+ h.l.Print(err)
+}
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler {
+ return GlobalErrorHandler
+}
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) {
+ GlobalErrorHandler.setDelegate(h)
+}
+
+// Handle is a convenience function for ErrorHandler().Handle(err).
+func Handle(err error) {
+ GetErrorHandler().Handle(err)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index 293c08961f..5951fd06d4 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -24,7 +24,7 @@ import (
"github.com/go-logr/stdr"
)
-// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals.
+// globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
@@ -36,8 +36,9 @@ func init() {
// SetLogger overrides the globalLogger with l.
//
-// To see Info messages use a logger with `l.V(1).Enabled() == true`
-// To see Debug messages use a logger with `l.V(5).Enabled() == true`.
+// To see Warn messages use a logger with `l.V(1).Enabled() == true`
+// To see Info messages use a logger with `l.V(4).Enabled() == true`
+// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
func SetLogger(l logr.Logger) {
atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
}
@@ -47,9 +48,9 @@ func getLogger() logr.Logger {
}
// Info prints messages about the general state of the API or SDK.
-// This should usually be less then 5 messages a minute.
+// This should usually be less than 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
- getLogger().V(1).Info(msg, keysAndValues...)
+ getLogger().V(4).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
@@ -59,5 +60,11 @@ func Error(err error, msg string, keysAndValues ...interface{}) {
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
- getLogger().V(5).Info(msg, keysAndValues...)
+ getLogger().V(8).Info(msg, keysAndValues...)
+}
+
+// Warn prints messages about warnings in the API or SDK.
+// Not an error but is likely more important than an informational event.
+func Warn(msg string, keysAndValues ...interface{}) {
+ getLogger().V(1).Info(msg, keysAndValues...)
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index f058cc781e..cb3efbb9ad 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -25,6 +25,7 @@ type TracerConfig struct {
instrumentationVersion string
// Schema URL of the telemetry emitted by the Tracer.
schemaURL string
+ attrs attribute.Set
}
// InstrumentationVersion returns the version of the library providing instrumentation.
@@ -32,6 +33,12 @@ func (t *TracerConfig) InstrumentationVersion() string {
return t.instrumentationVersion
}
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
+ return t.attrs
+}
+
// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
func (t *TracerConfig) SchemaURL() string {
return t.schemaURL
@@ -307,6 +314,16 @@ func WithInstrumentationVersion(version string) TracerOption {
})
}
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+ return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
// WithSchemaURL sets the schema URL for the Tracer.
func WithSchemaURL(schemaURL string) TracerOption {
return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index 73950f2077..7cf6c7f3ef 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -37,7 +37,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
return noopTracer{}
}
-// noopTracer is an implementation of Tracer that preforms no operations.
+// noopTracer is an implementation of Tracer that performs no operations.
type noopTracer struct{}
var _ Tracer = noopTracer{}
@@ -53,7 +53,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
return ContextWithSpan(ctx, span), span
}
-// noopSpan is an implementation of Span that preforms no operations.
+// noopSpan is an implementation of Span that performs no operations.
type noopSpan struct{}
var _ Span = noopSpan{}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index d82fbaf550..c8748b5dea 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.13.0"
+ return "1.15.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 22df4553c2..66fc74dd93 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -14,10 +14,11 @@
module-sets:
stable-v1:
- version: v1.13.0
+ version: v1.15.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
+ - go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/fib
- go.opentelemetry.io/otel/example/jaeger
- go.opentelemetry.io/otel/example/namedtracer
@@ -25,16 +26,16 @@ module-sets:
- go.opentelemetry.io/otel/example/passthrough
- go.opentelemetry.io/otel/example/zipkin
- go.opentelemetry.io/otel/exporters/jaeger
- - go.opentelemetry.io/otel/exporters/zipkin
+ - go.opentelemetry.io/otel/exporters/otlp/internal/retry
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
- - go.opentelemetry.io/otel/exporters/otlp/internal/retry
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- - go.opentelemetry.io/otel/trace
+ - go.opentelemetry.io/otel/exporters/zipkin
- go.opentelemetry.io/otel/sdk
+ - go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.36.0
+ version: v0.38.0
modules:
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
@@ -49,7 +50,7 @@ module-sets:
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/example/view
experimental-schema:
- version: v0.0.3
+ version: v0.0.4
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 0000000000..571116cc39
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,19 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
+# Also update COVER_IGNORE_PKGS in the Makefile.
+ignore:
+ - /internal/gen-atomicint/
+ - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 0000000000..2e337a0ed5
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,15 @@
+/bin
+.DS_Store
+/vendor
+cover.html
+cover.out
+lint.log
+
+# Binaries
+*.test
+
+# Profiling output
+*.prof
+
+# Output of fossa analyzer
+/fossa
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
new file mode 100644
index 0000000000..5fe03f21bd
--- /dev/null
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -0,0 +1,117 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [1.10.0] - 2022-08-11
+### Added
+- Add `atomic.Float32` type for atomic operations on `float32`.
+- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
+ and `atomic.Value`.
+- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
+ type. This is present only for Go 1.18 or higher, and is a drop-in for
+ replacement for the standard library's `sync/atomic.Pointer` type.
+
+### Changed
+- Deprecate `CAS` methods on all types in favor of corresponding
+ `CompareAndSwap` methods.
+
+Thanks to @eNV25 and @icpd for their contributions to this release.
+
+[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
+
+## [1.9.0] - 2021-07-15
+### Added
+- Add `Float64.Swap` to match int atomic operations.
+- Add `atomic.Time` type for atomic operations on `time.Time` values.
+
+[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
+
+## [1.8.0] - 2021-06-09
+### Added
+- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
+- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
+
+[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
+
+## [1.7.0] - 2020-09-14
+### Added
+- Support JSON serialization and deserialization of primitive atomic types.
+- Support Text marshalling and unmarshalling for string atomics.
+
+### Changed
+- Disallow incorrect comparison of atomic values in a non-atomic way.
+
+### Removed
+- Remove dependency on `golang.org/x/{lint, tools}`.
+
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+
+## [1.6.0] - 2020-02-24
+### Changed
+- Drop library dependency on `golang.org/x/{lint, tools}`.
+
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+
+## [1.5.1] - 2019-11-19
+- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
+ causing `CAS` to fail even though the old value matches.
+
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+
+## [1.5.0] - 2019-10-29
+### Changed
+- With Go modules, only the `go.uber.org/atomic` import path is supported now.
+ If you need to use the old import path, please add a `replace` directive to
+ your `go.mod`.
+
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+
+## [1.4.0] - 2019-05-01
+### Added
+ - Add `atomic.Error` type for atomic operations on `error` values.
+
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+
+## [1.3.2] - 2018-05-02
+### Added
+- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+
+## [1.3.1] - 2017-11-14
+### Fixed
+- Revert optimization for `atomic.String.Store("")` which caused data races.
+
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+
+## [1.3.0] - 2017-11-13
+### Added
+- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
+
+### Changed
+- Optimize `atomic.String.Store("")` by avoiding an allocation.
+
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+
+## [1.2.0] - 2017-04-12
+### Added
+- Shadow `atomic.Value` from `sync/atomic`.
+
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+
+## [1.1.0] - 2017-03-10
+### Added
+- Add atomic `Float64` type.
+
+### Changed
+- Support new `go.uber.org/atomic` import path.
+
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+
+## [1.0.0] - 2016-07-18
+
+- Initial release.
+
+[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 0000000000..8765c9fbc6
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 0000000000..46c945b32b
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,79 @@
+# Directory to place `go install`ed binaries into.
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+GEN_ATOMICINT = $(GOBIN)/gen-atomicint
+GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
+STATICCHECK = $(GOBIN)/staticcheck
+
+GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
+
+# Also update ignore section in .codecov.yml.
+COVER_IGNORE_PKGS = \
+ go.uber.org/atomic/internal/gen-atomicint \
+ go.uber.org/atomic/internal/gen-atomicwrapper
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
+ go build -o $@ ./internal/gen-atomicwrapper
+
+$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
+ go build -o $@ ./internal/gen-atomicint
+
+.PHONY: golint
+golint: $(GOLINT)
+ $(GOLINT) ./...
+
+.PHONY: staticcheck
+staticcheck: $(STATICCHECK)
+ $(STATICCHECK) ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck generatenodirty
+
+# comma separated list of packages to consider for code coverage.
+COVER_PKG = $(shell \
+ go list -find ./... | \
+ grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
+ paste -sd, -)
+
+.PHONY: cover
+cover:
+ go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: generate
+generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
+ go generate ./...
+
+.PHONY: generatenodirty
+generatenodirty:
+ @[ -z "$$(git status --porcelain)" ] || ( \
+ echo "Working tree is dirty. Commit your changes first."; \
+ git status; \
+ exit 1 )
+ @make generate
+ @status=$$(git status --porcelain); \
+ [ -z "$$status" ] || ( \
+ echo "Working tree is dirty after `make generate`:"; \
+ echo "$$status"; \
+ echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 0000000000..96b47a1f12
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,63 @@
+# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+
+```shell
+$ go get -u go.uber.org/atomic@v1
+```
+
+### Legacy Import Path
+
+As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
+of using this package. If you are using Go modules, this package will fail to
+compile with the legacy import path path `github.com/uber-go/atomic`.
+
+We recommend migrating your code to the new import path but if you're unable
+to do so, or if your dependencies are still using the old import path, you
+will have to add a `replace` directive to your `go.mod` file downgrading the
+legacy import path to an older version.
+
+```
+replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
+```
+
+You can do so automatically by running the following command.
+
+```shell
+$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
+```
+
+## Usage
+
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `go.uber.org/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CAS(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+
+Stable.
+
+---
+
+Released under the [MIT License](LICENSE.txt).
+
+[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
+[doc]: https://godoc.org/go.uber.org/atomic
+[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml
+[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/atomic
+[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
+[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
new file mode 100644
index 0000000000..dfa2085f49
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -0,0 +1,88 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+)
+
+// Bool is an atomic type-safe wrapper for bool values.
+type Bool struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroBool bool
+
+// NewBool creates a new Bool.
+func NewBool(val bool) *Bool {
+ x := &Bool{}
+ if val != _zeroBool {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped bool.
+func (x *Bool) Load() bool {
+ return truthy(x.v.Load())
+}
+
+// Store atomically stores the passed bool.
+func (x *Bool) Store(val bool) {
+ x.v.Store(boolToInt(val))
+}
+
+// CAS is an atomic compare-and-swap for bool values.
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Bool) CAS(old, new bool) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for bool values.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+ return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
+}
+
+// Swap atomically stores the given bool and returns the old
+// value.
+func (x *Bool) Swap(val bool) (old bool) {
+ return truthy(x.v.Swap(boolToInt(val)))
+}
+
+// MarshalJSON encodes the wrapped bool into JSON.
+func (x *Bool) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a bool from JSON.
+func (x *Bool) UnmarshalJSON(b []byte) error {
+ var v bool
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
new file mode 100644
index 0000000000..a2e60e9873
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool_ext.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
+
+func truthy(n uint32) bool {
+ return n == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() (old bool) {
+ for {
+ old := b.Load()
+ if b.CAS(old, !old) {
+ return old
+ }
+ }
+}
+
+// String encodes the wrapped value as a string.
+func (b *Bool) String() string {
+ return strconv.FormatBool(b.Load())
+}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
new file mode 100644
index 0000000000..ae7390ee68
--- /dev/null
+++ b/vendor/go.uber.org/atomic/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
new file mode 100644
index 0000000000..6f4157445c
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -0,0 +1,89 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// Duration is an atomic type-safe wrapper for time.Duration values.
+type Duration struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Int64
+}
+
+var _zeroDuration time.Duration
+
+// NewDuration creates a new Duration.
+func NewDuration(val time.Duration) *Duration {
+ x := &Duration{}
+ if val != _zeroDuration {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Duration.
+func (x *Duration) Load() time.Duration {
+ return time.Duration(x.v.Load())
+}
+
+// Store atomically stores the passed time.Duration.
+func (x *Duration) Store(val time.Duration) {
+ x.v.Store(int64(val))
+}
+
+// CAS is an atomic compare-and-swap for time.Duration values.
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
+ return x.v.CompareAndSwap(int64(old), int64(new))
+}
+
+// Swap atomically stores the given time.Duration and returns the old
+// value.
+func (x *Duration) Swap(val time.Duration) (old time.Duration) {
+ return time.Duration(x.v.Swap(int64(val)))
+}
+
+// MarshalJSON encodes the wrapped time.Duration into JSON.
+func (x *Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a time.Duration from JSON.
+func (x *Duration) UnmarshalJSON(b []byte) error {
+ var v time.Duration
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
new file mode 100644
index 0000000000..4c18b0a9ed
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration_ext.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (d *Duration) Add(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(delta)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (d *Duration) Sub(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(delta)))
+}
+
+// String encodes the wrapped value as a string.
+func (d *Duration) String() string {
+ return d.Load().String()
+}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
new file mode 100644
index 0000000000..27b23ea162
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error.go
@@ -0,0 +1,62 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// Error is an atomic type-safe wrapper for error values.
+type Error struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroError error
+
+// NewError creates a new Error.
+func NewError(val error) *Error {
+ x := &Error{}
+ if val != _zeroError {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped error.
+func (x *Error) Load() error {
+ return unpackError(x.v.Load())
+}
+
+// Store atomically stores the passed error.
+func (x *Error) Store(val error) {
+ x.v.Store(packError(val))
+}
+
+// CompareAndSwap is an atomic compare-and-swap for error values.
+func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
+ return x.v.CompareAndSwap(packError(old), packError(new))
+}
+
+// Swap atomically stores the given error and returns the old
+// value.
+func (x *Error) Swap(val error) (old error) {
+ return unpackError(x.v.Swap(packError(val)))
+}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
new file mode 100644
index 0000000000..d31fb633bb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error_ext.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// atomic.Value panics on nil inputs, or if the underlying type changes.
+// Stabilize by always storing a custom struct that we control.
+
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
+
+type packedError struct{ Value error }
+
+func packError(v error) interface{} {
+ return packedError{v}
+}
+
+func unpackError(v interface{}) error {
+ if err, ok := v.(packedError); ok {
+ return err.Value
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go
new file mode 100644
index 0000000000..5d535a6d2a
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float32.go
@@ -0,0 +1,77 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float32 is an atomic type-safe wrapper for float32 values.
+type Float32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroFloat32 float32
+
+// NewFloat32 creates a new Float32.
+func NewFloat32(val float32) *Float32 {
+ x := &Float32{}
+ if val != _zeroFloat32 {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float32.
+func (x *Float32) Load() float32 {
+ return math.Float32frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float32.
+func (x *Float32) Store(val float32) {
+ x.v.Store(math.Float32bits(val))
+}
+
+// Swap atomically stores the given float32 and returns the old
+// value.
+func (x *Float32) Swap(val float32) (old float32) {
+ return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
+}
+
+// MarshalJSON encodes the wrapped float32 into JSON.
+func (x *Float32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float32 from JSON.
+func (x *Float32) UnmarshalJSON(b []byte) error {
+ var v float32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go
new file mode 100644
index 0000000000..b0cd8d9c82
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float32_ext.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "math"
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
+
+// Add atomically adds to the wrapped float32 and returns the new value.
+func (f *Float32) Add(delta float32) float32 {
+ for {
+ old := f.Load()
+ new := old + delta
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float32 and returns the new value.
+func (f *Float32) Sub(delta float32) float32 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float32 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float32) CAS(old, new float32) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float32 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float32) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
+}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
new file mode 100644
index 0000000000..11d5189a5f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -0,0 +1,77 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float64 is an atomic type-safe wrapper for float64 values.
+type Float64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint64
+}
+
+var _zeroFloat64 float64
+
+// NewFloat64 creates a new Float64.
+func NewFloat64(val float64) *Float64 {
+ x := &Float64{}
+ if val != _zeroFloat64 {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float64.
+func (x *Float64) Load() float64 {
+ return math.Float64frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float64.
+func (x *Float64) Store(val float64) {
+ x.v.Store(math.Float64bits(val))
+}
+
+// Swap atomically stores the given float64 and returns the old
+// value.
+func (x *Float64) Swap(val float64) (old float64) {
+ return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
+}
+
+// MarshalJSON encodes the wrapped float64 into JSON.
+func (x *Float64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float64 from JSON.
+func (x *Float64) UnmarshalJSON(b []byte) error {
+ var v float64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
new file mode 100644
index 0000000000..48c52b0abf
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64_ext.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "math"
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(delta float64) float64 {
+ for {
+ old := f.Load()
+ new := old + delta
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(delta float64) float64 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float64) CAS(old, new float64) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float64 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float64) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(f.Load(), 'g', -1, 64)
+}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
new file mode 100644
index 0000000000..1e9ef4f879
--- /dev/null
+++ b/vendor/go.uber.org/atomic/gen.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
+//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
+//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
+//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
+//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
new file mode 100644
index 0000000000..b9a68f42ca
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int32 is an atomic wrapper around int32.
+type Int32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int32
+}
+
+// NewInt32 creates a new Int32.
+func NewInt32(val int32) *Int32 {
+ return &Int32{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(delta int32) int32 {
+ return atomic.AddInt32(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(delta int32) int32 {
+ return atomic.AddInt32(&i.v, -delta)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int32) CAS(old, new int32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(val int32) {
+ atomic.StoreInt32(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(val int32) (old int32) {
+ return atomic.SwapInt32(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped int32 into JSON.
+func (i *Int32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int32.
+func (i *Int32) UnmarshalJSON(b []byte) error {
+ var v int32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int32) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
new file mode 100644
index 0000000000..78d260976f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int64 is an atomic wrapper around int64.
+type Int64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int64
+}
+
+// NewInt64 creates a new Int64.
+func NewInt64(val int64) *Int64 {
+ return &Int64{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(delta int64) int64 {
+ return atomic.AddInt64(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(delta int64) int64 {
+ return atomic.AddInt64(&i.v, -delta)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int64) CAS(old, new int64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(val int64) {
+ atomic.StoreInt64(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(val int64) (old int64) {
+ return atomic.SwapInt64(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped int64 into JSON.
+func (i *Int64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int64.
+func (i *Int64) UnmarshalJSON(b []byte) error {
+ var v int64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int64) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
new file mode 100644
index 0000000000..54b74174ab
--- /dev/null
+++ b/vendor/go.uber.org/atomic/nocmp.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// nocmp is an uncomparable struct. Embed this inside another struct to make
+// it uncomparable.
+//
+// type Foo struct {
+// nocmp
+// // ...
+// }
+//
+// This DOES NOT:
+//
+// - Disallow shallow copies of structs
+// - Disallow comparison of pointers to uncomparable structs
+type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go
new file mode 100644
index 0000000000..e0f47dba46
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go118.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18 && !go1.19
+// +build go1.18,!go1.19
+
+package atomic
+
+import "unsafe"
+
+type Pointer[T any] struct {
+ _ nocmp // disallow non-atomic comparison
+ p UnsafePointer
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+ var p Pointer[T]
+ if v != nil {
+ p.p.Store(unsafe.Pointer(v))
+ }
+ return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+ return (*T)(p.p.Load())
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+ p.p.Store(unsafe.Pointer(val))
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+ return (*T)(p.p.Swap(unsafe.Pointer(val)))
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
+}
diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go
new file mode 100644
index 0000000000..6726f17ad6
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go119.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.19
+// +build go1.19
+
+package atomic
+
+import "sync/atomic"
+
+// Pointer is an atomic pointer of type *T.
+type Pointer[T any] struct {
+ _ nocmp // disallow non-atomic comparison
+ p atomic.Pointer[T]
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+ var p Pointer[T]
+ if v != nil {
+ p.p.Store(v)
+ }
+ return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+ return p.p.Load()
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+ p.p.Store(val)
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+ return p.p.Swap(val)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return p.p.CompareAndSwap(old, new)
+}
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 0000000000..c4bea70f4d
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,65 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper for string values.
+type String struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroString string
+
+// NewString creates a new String.
+func NewString(val string) *String {
+ x := &String{}
+ if val != _zeroString {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped string.
+func (x *String) Load() string {
+ if v := x.v.Load(); v != nil {
+ return v.(string)
+ }
+ return _zeroString
+}
+
+// Store atomically stores the passed string.
+func (x *String) Store(val string) {
+ x.v.Store(val)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for string values.
+func (x *String) CompareAndSwap(old, new string) (swapped bool) {
+ return x.v.CompareAndSwap(old, new)
+}
+
+// Swap atomically stores the given string and returns the old
+// value.
+func (x *String) Swap(val string) (old string) {
+ return x.v.Swap(val).(string)
+}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
new file mode 100644
index 0000000000..1f63dfd5b9
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go
+
+// String returns the wrapped value.
+func (s *String) String() string {
+ return s.Load()
+}
+
+// MarshalText encodes the wrapped string into a textual form.
+//
+// This makes it encodable as JSON, YAML, XML, and more.
+func (s *String) MarshalText() ([]byte, error) {
+ return []byte(s.Load()), nil
+}
+
+// UnmarshalText decodes text and replaces the wrapped string with it.
+//
+// This makes it decodable from JSON, YAML, XML, and more.
+func (s *String) UnmarshalText(b []byte) error {
+ s.Store(string(b))
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go
new file mode 100644
index 0000000000..1660feb142
--- /dev/null
+++ b/vendor/go.uber.org/atomic/time.go
@@ -0,0 +1,55 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "time"
+)
+
+// Time is an atomic type-safe wrapper for time.Time values.
+type Time struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroTime time.Time
+
+// NewTime creates a new Time.
+func NewTime(val time.Time) *Time {
+ x := &Time{}
+ if val != _zeroTime {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Time.
+func (x *Time) Load() time.Time {
+ return unpackTime(x.v.Load())
+}
+
+// Store atomically stores the passed time.Time.
+func (x *Time) Store(val time.Time) {
+ x.v.Store(packTime(val))
+}
diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go
new file mode 100644
index 0000000000..1e3dc978aa
--- /dev/null
+++ b/vendor/go.uber.org/atomic/time_ext.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
+
+func packTime(t time.Time) interface{} {
+ return t
+}
+
+func unpackTime(v interface{}) time.Time {
+ if t, ok := v.(time.Time); ok {
+ return t
+ }
+ return time.Time{}
+}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
new file mode 100644
index 0000000000..d6f04a96dc
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint32 is an atomic wrapper around uint32.
+type Uint32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint32
+}
+
+// NewUint32 creates a new Uint32.
+func NewUint32(val uint32) *Uint32 {
+ return &Uint32{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint32) CAS(old, new uint32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(val uint32) {
+ atomic.StoreUint32(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(val uint32) (old uint32) {
+ return atomic.SwapUint32(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uint32 into JSON.
+func (i *Uint32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint32.
+func (i *Uint32) UnmarshalJSON(b []byte) error {
+ var v uint32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint32) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
new file mode 100644
index 0000000000..2574bdd5ec
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint64 is an atomic wrapper around uint64.
+type Uint64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint64
+}
+
+// NewUint64 creates a new Uint64.
+func NewUint64(val uint64) *Uint64 {
+ return &Uint64{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint64) CAS(old, new uint64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(val uint64) {
+ atomic.StoreUint64(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(val uint64) (old uint64) {
+ return atomic.SwapUint64(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uint64 into JSON.
+func (i *Uint64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint64.
+func (i *Uint64) UnmarshalJSON(b []byte) error {
+ var v uint64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint64) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
new file mode 100644
index 0000000000..81b275a7ad
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uintptr.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uintptr is an atomic wrapper around uintptr.
+type Uintptr struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uintptr
+}
+
+// NewUintptr creates a new Uintptr.
+func NewUintptr(val uintptr) *Uintptr {
+ return &Uintptr{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uintptr) Load() uintptr {
+ return atomic.LoadUintptr(&i.v)
+}
+
+// Add atomically adds to the wrapped uintptr and returns the new value.
+func (i *Uintptr) Add(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uintptr and returns the new value.
+func (i *Uintptr) Sub(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uintptr and returns the new value.
+func (i *Uintptr) Inc() uintptr {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uintptr and returns the new value.
+func (i *Uintptr) Dec() uintptr {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
+ return atomic.CompareAndSwapUintptr(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uintptr) Store(val uintptr) {
+ atomic.StoreUintptr(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uintptr and returns the old value.
+func (i *Uintptr) Swap(val uintptr) (old uintptr) {
+ return atomic.SwapUintptr(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uintptr into JSON.
+func (i *Uintptr) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uintptr.
+func (i *Uintptr) UnmarshalJSON(b []byte) error {
+ var v uintptr
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uintptr) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go
new file mode 100644
index 0000000000..34868baf6a
--- /dev/null
+++ b/vendor/go.uber.org/atomic/unsafe_pointer.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2021-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// UnsafePointer is an atomic wrapper around unsafe.Pointer.
+type UnsafePointer struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v unsafe.Pointer
+}
+
+// NewUnsafePointer creates a new UnsafePointer.
+func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
+ return &UnsafePointer{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (p *UnsafePointer) Load() unsafe.Pointer {
+ return atomic.LoadPointer(&p.v)
+}
+
+// Store atomically stores the passed value.
+func (p *UnsafePointer) Store(val unsafe.Pointer) {
+ atomic.StorePointer(&p.v, val)
+}
+
+// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
+func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
+ return atomic.SwapPointer(&p.v, val)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap
+func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
+ return p.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
+ return atomic.CompareAndSwapPointer(&p.v, old, new)
+}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
new file mode 100644
index 0000000000..52caedb9a5
--- /dev/null
+++ b/vendor/go.uber.org/atomic/value.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "sync/atomic"
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct {
+ _ nocmp // disallow non-atomic comparison
+
+ atomic.Value
+}
diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml
new file mode 100644
index 0000000000..6d4d1be7b5
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore
new file mode 100644
index 0000000000..b9a05e3da0
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+cover.html
+cover.out
+/bin
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
new file mode 100644
index 0000000000..f8177b978c
--- /dev/null
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -0,0 +1,95 @@
+Releases
+========
+
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+- Add `AppendFunc` that allow passsing functions to similar to
+ `AppendInvoke`.
+
+- Bump up yaml.v3 dependency to 3.0.1.
+
+v1.8.0 (2022-02-28)
+===================
+
+- `Combine`: perform zero allocations when there are no errors.
+
+
+v1.7.0 (2021-05-06)
+===================
+
+- Add `AppendInvoke` to append into errors from `defer` blocks.
+
+
+v1.6.0 (2020-09-14)
+===================
+
+- Actually drop library dependency on development-time tooling.
+
+
+v1.5.0 (2020-02-24)
+===================
+
+- Drop library dependency on development-time tooling.
+
+
+v1.4.0 (2019-11-04)
+===================
+
+- Add `AppendInto` function to more ergonomically build errors inside a
+ loop.
+
+
+v1.3.0 (2019-10-29)
+===================
+
+- Switch to Go modules.
+
+
+v1.2.0 (2019-09-26)
+===================
+
+- Support extracting and matching against wrapped errors with `errors.As`
+ and `errors.Is`.
+
+
+v1.1.0 (2017-06-30)
+===================
+
+- Added an `Errors(error) []error` function to extract the underlying list of
+ errors for a multierr error.
+
+
+v1.0.0 (2017-05-31)
+===================
+
+No changes since v0.2.0. This release is committing to making no breaking
+changes to the current API in the 1.X series.
+
+
+v0.2.0 (2017-04-11)
+===================
+
+- Repeatedly appending to the same error is now faster due to fewer
+ allocations.
+
+
+v0.1.0 (2017-31-03)
+===================
+
+- Initial release
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
new file mode 100644
index 0000000000..413e30f7ce
--- /dev/null
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2017-2021 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
new file mode 100644
index 0000000000..dcb6fe723c
--- /dev/null
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -0,0 +1,38 @@
+# Directory to put `go install`ed binaries in.
+export GOBIN ?= $(shell pwd)/bin
+
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false)
+
+.PHONY: golint
+golint:
+ @cd tools && go install golang.org/x/lint/golint
+ @$(GOBIN)/golint ./...
+
+.PHONY: staticcheck
+staticcheck:
+ @cd tools && go install honnef.co/go/tools/cmd/staticcheck
+ @$(GOBIN)/staticcheck ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck
+
+.PHONY: cover
+cover:
+ go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
+ go tool cover -html=cover.out -o cover.html
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
new file mode 100644
index 0000000000..5ab6ac40f4
--- /dev/null
+++ b/vendor/go.uber.org/multierr/README.md
@@ -0,0 +1,43 @@
+# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+`multierr` allows combining one or more Go `error`s together.
+
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
+## Installation
+
+```bash
+go get -u go.uber.org/multierr@latest
+```
+
+## Status
+
+Stable: No breaking changes will be made before 2.0.
+
+-------------------------------------------------------------------------------
+
+Released under the [MIT License].
+
+[MIT License]: LICENSE.txt
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
+[doc]: https://pkg.go.dev/go.uber.org/multierr
+[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
+[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
+[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
+[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
new file mode 100644
index 0000000000..3a828b2dff
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error.go
@@ -0,0 +1,646 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package multierr allows combining one or more errors together.
+//
+// # Overview
+//
+// Errors can be combined with the use of the Combine function.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
+//
+// If only two errors are being combined, the Append function may be used
+// instead.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// The underlying list of errors for a returned error object may be retrieved
+// with the Errors function.
+//
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
+//
+// # Appending from a loop
+//
+// You sometimes need to append into an error from a loop.
+//
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
+//
+// Cases like this may require knowledge of whether an individual instance
+// failed. This usually requires introduction of a new variable.
+//
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
+//
+// multierr includes AppendInto to simplify cases like this.
+//
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
+//
+// This will append the error into the err variable, and return true if that
+// individual error was non-nil.
+//
+// See [AppendInto] for more information.
+//
+// # Deferred Functions
+//
+// Go makes it possible to modify the return value of a function in a defer
+// block if the function was using named returns. This makes it possible to
+// record resource cleanup failures from deferred blocks.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// multierr provides the Invoker type and AppendInvoke function to make cases
+// like the above simpler and obviate the need for a closure. The following is
+// roughly equivalent to the example above.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
+// }
+//
+// See [AppendInvoke] and [Invoker] for more information.
+//
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
+//
+// Errors returned by Combine and Append MAY implement the following
+// interface.
+//
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
+//
+// Note that if you need access to list of errors behind a multierr error, you
+// should prefer using the Errors function. That said, if you need cheap
+// read-only access to the underlying errors slice, you can attempt to cast
+// the error to this interface. You MUST handle the failure case gracefully
+// because errors returned by Combine and Append are not guaranteed to
+// implement this interface.
+//
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
+package multierr // import "go.uber.org/multierr"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+var (
+ // Separator for single-line error messages.
+ _singlelineSeparator = []byte("; ")
+
+ // Prefix for multi-line messages
+ _multilinePrefix = []byte("the following errors occurred:")
+
+ // Prefix for the first and following lines of an item in a list of
+ // multi-line error messages.
+ //
+ // For example, if a single item is:
+ //
+ // foo
+ // bar
+ //
+ // It will become,
+ //
+ // - foo
+ // bar
+ _multilineSeparator = []byte("\n - ")
+ _multilineIndent = []byte(" ")
+)
+
+// _bufferPool is a pool of bytes.Buffers.
+var _bufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+}
+
+type errorGroup interface {
+ Errors() []error
+}
+
+// Errors returns a slice containing zero or more errors that the supplied
+// error is composed of. If the error is nil, a nil slice is returned.
+//
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
+//
+// If the error is not composed of other errors, the returned slice contains
+// just the error that was passed in.
+//
+// Callers of this function are free to modify the returned slice.
+func Errors(err error) []error {
+ return extractErrors(err)
+}
+
+// multiError is an error that holds one or more errors.
+//
+// An instance of this is guaranteed to be non-empty and flattened. That is,
+// none of the errors inside multiError are other multiErrors.
+//
+// multiError formats to a semi-colon delimited list of error messages with
+// %v and with a more readable multi-line format with %+v.
+type multiError struct {
+ copyNeeded atomic.Bool
+ errors []error
+}
+
+// Errors returns the list of underlying errors.
+//
+// This slice MUST NOT be modified.
+func (merr *multiError) Errors() []error {
+ if merr == nil {
+ return nil
+ }
+ return merr.errors
+}
+
+func (merr *multiError) Error() string {
+ if merr == nil {
+ return ""
+ }
+
+ buff := _bufferPool.Get().(*bytes.Buffer)
+ buff.Reset()
+
+ merr.writeSingleline(buff)
+
+ result := buff.String()
+ _bufferPool.Put(buff)
+ return result
+}
+
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
+func (merr *multiError) Format(f fmt.State, c rune) {
+ if c == 'v' && f.Flag('+') {
+ merr.writeMultiline(f)
+ } else {
+ merr.writeSingleline(f)
+ }
+}
+
+func (merr *multiError) writeSingleline(w io.Writer) {
+ first := true
+ for _, item := range merr.errors {
+ if first {
+ first = false
+ } else {
+ w.Write(_singlelineSeparator)
+ }
+ io.WriteString(w, item.Error())
+ }
+}
+
+func (merr *multiError) writeMultiline(w io.Writer) {
+ w.Write(_multilinePrefix)
+ for _, item := range merr.errors {
+ w.Write(_multilineSeparator)
+ writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
+ }
+}
+
+// Writes s to the writer with the given prefix added before each line after
+// the first.
+func writePrefixLine(w io.Writer, prefix []byte, s string) {
+ first := true
+ for len(s) > 0 {
+ if first {
+ first = false
+ } else {
+ w.Write(prefix)
+ }
+
+ idx := strings.IndexByte(s, '\n')
+ if idx < 0 {
+ idx = len(s) - 1
+ }
+
+ io.WriteString(w, s[:idx+1])
+ s = s[idx+1:]
+ }
+}
+
+type inspectResult struct {
+ // Number of top-level non-nil errors
+ Count int
+
+ // Total number of errors including multiErrors
+ Capacity int
+
+ // Index of the first non-nil error in the list. Value is meaningless if
+ // Count is zero.
+ FirstErrorIdx int
+
+ // Whether the list contains at least one multiError
+ ContainsMultiError bool
+}
+
+// Inspects the given slice of errors so that we can efficiently allocate
+// space for it.
+func inspect(errors []error) (res inspectResult) {
+ first := true
+ for i, err := range errors {
+ if err == nil {
+ continue
+ }
+
+ res.Count++
+ if first {
+ first = false
+ res.FirstErrorIdx = i
+ }
+
+ if merr, ok := err.(*multiError); ok {
+ res.Capacity += len(merr.errors)
+ res.ContainsMultiError = true
+ } else {
+ res.Capacity++
+ }
+ }
+ return
+}
+
+// fromSlice converts the given list of errors into a single error.
+func fromSlice(errors []error) error {
+ // Don't pay to inspect small slices.
+ switch len(errors) {
+ case 0:
+ return nil
+ case 1:
+ return errors[0]
+ }
+
+ res := inspect(errors)
+ switch res.Count {
+ case 0:
+ return nil
+ case 1:
+ // only one non-nil entry
+ return errors[res.FirstErrorIdx]
+ case len(errors):
+ if !res.ContainsMultiError {
+ // Error list is flat. Make a copy of it
+ // Otherwise "errors" escapes to the heap
+ // unconditionally for all other cases.
+ // This lets us optimize for the "no errors" case.
+ out := append(([]error)(nil), errors...)
+ return &multiError{errors: out}
+ }
+ }
+
+ nonNilErrs := make([]error, 0, res.Capacity)
+ for _, err := range errors[res.FirstErrorIdx:] {
+ if err == nil {
+ continue
+ }
+
+ if nested, ok := err.(*multiError); ok {
+ nonNilErrs = append(nonNilErrs, nested.errors...)
+ } else {
+ nonNilErrs = append(nonNilErrs, err)
+ }
+ }
+
+ return &multiError{errors: nonNilErrs}
+}
+
+// Combine combines the passed errors into a single error.
+//
+// If zero arguments were passed or if all items are nil, a nil error is
+// returned.
+//
+// Combine(nil, nil) // == nil
+//
+// If only a single error was passed, it is returned as-is.
+//
+// Combine(err) // == err
+//
+// Combine skips over nil arguments so this function may be used to combine
+// together errors from operations that fail independently of each other.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
+//
+// If any of the passed errors is a multierr error, it will be flattened along
+// with the other errors.
+//
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
+//
+// The returned error formats into a readable multi-line error message if
+// formatted with %+v.
+//
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+func Combine(errors ...error) error {
+ return fromSlice(errors)
+}
+
+// Append appends the given errors together. Either value may be nil.
+//
+// This function is a specialization of Combine for the common case where
+// there are only two errors.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// The following pattern may also be used to record failure of deferred
+// operations without losing information about the original error.
+//
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
+func Append(left error, right error) error {
+ switch {
+ case left == nil:
+ return right
+ case right == nil:
+ return left
+ }
+
+ if _, ok := right.(*multiError); !ok {
+ if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
+ // Common case where the error on the left is constantly being
+ // appended to.
+ errs := append(l.errors, right)
+ return &multiError{errors: errs}
+ } else if !ok {
+ // Both errors are single errors.
+ return &multiError{errors: []error{left, right}}
+ }
+ }
+
+ // Either right or both, left and right, are multiErrors. Rely on usual
+ // expensive logic.
+ errors := [2]error{left, right}
+ return fromSlice(errors[0:])
+}
+
+// AppendInto appends an error into the destination of an error pointer and
+// returns whether the error being appended was non-nil.
+//
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
+//
+// The above is equivalent to,
+//
+// err := multierr.Append(r.Close(), w.Close())
+//
+// As AppendInto reports whether the provided error was non-nil, it may be
+// used to build a multierr error in a loop more ergonomically. For example:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
+//
+// Compare this with a version that relies solely on Append:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
+func AppendInto(into *error, err error) (errored bool) {
+ if into == nil {
+ // We panic if 'into' is nil. This is not documented above
+ // because suggesting that the pointer must be non-nil may
+ // confuse users into thinking that the error that it points
+ // to must be non-nil.
+ panic("misuse of multierr.AppendInto: into pointer must not be nil")
+ }
+
+ if err == nil {
+ return false
+ }
+ *into = Append(*into, err)
+ return true
+}
+
+// Invoker is an operation that may fail with an error. Use it with
+// AppendInvoke to append the result of calling the function into an error.
+// This allows you to conveniently defer capture of failing operations.
+//
+// See also, [Close] and [Invoke].
+type Invoker interface {
+ Invoke() error
+}
+
+// Invoke wraps a function which may fail with an error to match the Invoker
+// interface. Use it to supply functions matching this signature to
+// AppendInvoke.
+//
+// For example,
+//
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
+//
+// In this example, the following line will construct the Invoker right away,
+// but defer the invocation of scanner.Err() until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+type Invoke func() error
+
+// Invoke calls the supplied function and returns its result.
+func (i Invoke) Invoke() error { return i() }
+
+// Close builds an Invoker that closes the provided io.Closer. Use it with
+// AppendInvoke to close io.Closers and append their results into an error.
+//
+// For example,
+//
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
+//
+// In this example, multierr.Close will construct the Invoker right away, but
+// defer the invocation of f.Close until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+func Close(closer io.Closer) Invoker {
+ return Invoke(closer.Close)
+}
+
+// AppendInvoke appends the result of calling the given Invoker into the
+// provided error pointer. Use it with named returns to safely defer
+// invocation of fallible operations until a function returns, and capture the
+// resulting errors.
+//
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// // ...
+// }
+//
+// NOTE: If used with a defer, the error variable MUST be a named return.
+//
+// Without defer, AppendInvoke behaves exactly like AppendInto.
+//
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+//
+// // ...is roughly equivalent to...
+//
+// err := // ...
+// multierr.AppendInto(&err, foo())
+//
+// The advantage of the indirection introduced by Invoker is to make it easy
+// to defer the invocation of a function. Without this indirection, the
+// invoked function will be evaluated at the time of the defer block rather
+// than when the function returns.
+//
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
+//
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+//
+// multierr provides a few Invoker implementations out of the box for
+// convenience. See [Invoker] for more information.
+func AppendInvoke(into *error, invoker Invoker) {
+ AppendInto(into, invoker.Invoke())
+}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+// func doSomething(...) (err error) {
+// w, err := startWorker(...)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call w.Stop() when this function returns and
+// // if the operation fails, it appends its error into the
+// // returned error.
+// defer multierr.AppendFunc(&err, w.Stop)
+// }
+func AppendFunc(into *error, fn func() error) {
+ AppendInvoke(into, Invoke(fn))
+}
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 0000000000..a173f9c251
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
+
+type multipleErrors interface {
+ Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 0000000000..93872a3fcd
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml
new file mode 100644
index 0000000000..8e5ca7d3e2
--- /dev/null
+++ b/vendor/go.uber.org/zap/.codecov.yml
@@ -0,0 +1,17 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 95% # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+ignore:
+ - internal/readme/readme.go
+
diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore
new file mode 100644
index 0000000000..da9d9d00b4
--- /dev/null
+++ b/vendor/go.uber.org/zap/.gitignore
@@ -0,0 +1,32 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+vendor
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.pprof
+*.out
+*.log
+
+/bin
+cover.out
+cover.html
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
new file mode 100644
index 0000000000..92aa65d660
--- /dev/null
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -0,0 +1,109 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+{{.BenchmarkAddingFields}}
+
+Log a message with a logger that already has 10 fields of context:
+
+{{.BenchmarkAccumulatedContext}}
+
+Log a static string, without any context or `printf`-style templating:
+
+{{.BenchmarkWithoutFields}}
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE.txt).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
+
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
+
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
new file mode 100644
index 0000000000..0db1f9f15f
--- /dev/null
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -0,0 +1,617 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+ current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+ `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+ that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+
+## 1.22.0 (8 Aug 2022)
+
+Enhancements:
+* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
+ arrays of objects. With these two constructors, you don't need to implement
+ `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
+ `zapcore.ObjectMarshaler`.
+* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
+ `SugaredLogger` with the provided options applied.
+* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
+ These functions provide a string joining behavior similar to `fmt.Println`.
+* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
+ logger for `Fatal`-level log entries. This defaults to exiting the program.
+* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
+ `NewDevelopment` to panic if the system was unable to build the logger.
+* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
+ a statement dynamically.
+
+Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
+for their contributions to this release.
+
+[#1071]: https://github.com/uber-go/zap/pull/1071
+[#1079]: https://github.com/uber-go/zap/pull/1079
+[#1080]: https://github.com/uber-go/zap/pull/1080
+[#1088]: https://github.com/uber-go/zap/pull/1088
+[#1108]: https://github.com/uber-go/zap/pull/1108
+[#1118]: https://github.com/uber-go/zap/pull/1118
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+ string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+ `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+ characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+ encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+ methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+ better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
+
+## 1.18.1 (28 Jun 2021)
+
+Bugfixes:
+* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`.
+
+[#974]: https://github.com/uber-go/zap/pull/974
+
+## 1.18.0 (28 Jun 2021)
+
+Enhancements:
+* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers
+ messages in-memory and flushes them periodically.
+* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`.
+* [#897][]: Add `zap.WithClock` option to control the source of time via the
+ new `zapcore.Clock` interface.
+* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w`
+ methods don't match expectations.
+* [#943][]: Add support for filtering by level or arbitrary matcher function to
+ `zaptest/observer`.
+* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's
+ `buffer.Buffer`.
+
+Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee
+for their contributions to this release.
+
+[#691]: https://github.com/uber-go/zap/pull/691
+[#897]: https://github.com/uber-go/zap/pull/897
+[#943]: https://github.com/uber-go/zap/pull/943
+[#949]: https://github.com/uber-go/zap/pull/949
+[#961]: https://github.com/uber-go/zap/pull/961
+[#971]: https://github.com/uber-go/zap/pull/971
+
+## 1.17.0 (25 May 2021)
+
+Bugfixes:
+* [#867][]: Encode `` for nil `error` instead of a panic.
+* [#931][], [#936][]: Update minimum version constraints to address
+ vulnerabilities in dependencies.
+
+Enhancements:
+* [#865][]: Improve alignment of fields of the Logger struct, reducing its
+ size from 96 to 80 bytes.
+* [#881][]: Support `grpclog.LoggerV2` in zapgrpc.
+* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler
+ with the `application/x-www-form-urlencoded` content type.
+* [#912][]: Support multi-field encoding with `zap.Inline`.
+* [#913][]: Speed up SugaredLogger for calls with a single string.
+* [#928][]: Add support for filtering by field name to `zaptest/observer`.
+
+Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+
+## 1.16.0 (1 Sep 2020)
+
+Bugfixes:
+* [#828][]: Fix missing newline in IncreaseLevel error messages.
+* [#835][]: Fix panic in JSON encoder when encoding times or durations
+ without specifying a time or duration encoder.
+* [#843][]: Honor CallerSkip when taking stack traces.
+* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead.
+* [#854][]: Encode `` for nil `Stringer` instead of a panic error log.
+
+Enhancements:
+* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders
+ for custom layouts.
+* [#697][]: Added support for a configurable delimiter in the console encoder.
+* [#852][]: Optimize console encoder by pooling the underlying JSON encoder.
+* [#844][]: Add ability to include the calling function as part of logs.
+* [#843][]: Add `StackSkip` for including truncated stacks as a field.
+* [#861][]: Add options to customize Fatal behaviour for better testability.
+
+Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+
+## 1.15.0 (23 Apr 2020)
+
+Bugfixes:
+* [#804][]: Fix handling of `Time` values out of `UnixNano` range.
+* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`.
+
+Enhancements:
+* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This
+ allows disabling annotation of log entries with caller information if
+ previously enabled with `AddCaller`.
+* [#813][]: Deprecate `NewSampler` constructor in favor of
+ `NewSamplerWithOptions` which supports a `SamplerHook` option. This option
+ adds support for monitoring sampling decisions through a hook.
+
+Thanks to @danielbprice for their contributions to this release.
+
+## 1.14.1 (14 Mar 2020)
+
+Bugfixes:
+* [#791][]: Fix panic on attempting to build a logger with an invalid Config.
+* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's
+ development-time dependencies.
+* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to
+ be generated for arrays of `time.Time` objects when using string-based time
+ formats.
+
+Thanks to @YashishDua for their contributions to this release.
+
+## 1.14.0 (20 Feb 2020)
+
+Enhancements:
+* [#771][]: Optimize calls for disabled log levels.
+* [#773][]: Add millisecond duration encoder.
+* [#775][]: Add option to increase the level of a logger.
+* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible.
+
+Thanks to @caibirdme for their contributions to this release.
+
+## 1.13.0 (13 Nov 2019)
+
+Enhancements:
+* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors
+ to log pointers to primitives with support for `nil` values.
+
+Thanks to @jbizzle for their contributions to this release.
+
+## 1.12.0 (29 Oct 2019)
+
+Enhancements:
+* [#751][]: Migrate to Go modules.
+
+## 1.11.0 (21 Oct 2019)
+
+Enhancements:
+* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`.
+* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders.
+
+Thanks to @juicemia, @uhthomas for their contributions to this release.
+
+## 1.10.0 (29 Apr 2019)
+
+Bugfixes:
+* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a
+ string.
+* [#706][]: Fix incorrect call depth to determine caller in Go 1.12.
+
+Enhancements:
+* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test
+ loggers.
+* [#675][]: Don't panic when encoding a String field.
+* [#704][]: Disable HTML escaping for JSON objects encoded using the
+ reflect-based encoder.
+
+Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
+to this release.
+
+## v1.9.1 (06 Aug 2018)
+
+Bugfixes:
+
+* [#614][]: MapObjectEncoder should not ignore empty slices.
+
+## v1.9.0 (19 Jul 2018)
+
+Enhancements:
+* [#602][]: Reduce number of allocations when logging with reflection.
+* [#572][], [#606][]: Expose a registry for third-party logging sinks.
+
+Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
+@dimroc for their contributions to this release.
+
+## v1.8.0 (13 Apr 2018)
+
+Enhancements:
+* [#508][]: Make log level configurable when redirecting the standard
+ library's logger.
+* [#518][]: Add a logger that writes to a `*testing.TB`.
+* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc.
+
+Bugfixes:
+* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`.
+
+Thanks to @DiSiqueira and @djui for their contributions to this release.
+
+## v1.7.1 (25 Sep 2017)
+
+Bugfixes:
+* [#504][]: Store strings when using AddByteString with the map encoder.
+
+## v1.7.0 (21 Sep 2017)
+
+Enhancements:
+
+* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
+ to specify the level of the logged messages.
+
+## v1.6.0 (30 Aug 2017)
+
+Enhancements:
+
+* [#491][]: Omit zap stack frames from stacktraces.
+* [#490][]: Add a `ContextMap` method to observer logs for simpler
+ field validation in tests.
+
+## v1.5.0 (22 Jul 2017)
+
+Enhancements:
+
+* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`.
+* [#465][]: Support user-supplied encoders for logger names.
+
+Bugfixes:
+
+* [#477][]: Fix a bug that incorrectly truncated deep stacktraces.
+
+Thanks to @richard-tunein and @pavius for their contributions to this release.
+
+## v1.4.1 (08 Jun 2017)
+
+This release fixes two bugs.
+
+Bugfixes:
+
+* [#435][]: Support a variety of case conventions when unmarshaling levels.
+* [#444][]: Fix a panic in the observer.
+
+## v1.4.0 (12 May 2017)
+
+This release adds a few small features and is fully backward-compatible.
+
+Enhancements:
+
+* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to
+ override the Unix-style default.
+* [#425][]: Preserve time zones when logging times.
+* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
+ variety of operations a bit simpler.
+
+## v1.3.0 (25 Apr 2017)
+
+This release adds an enhancement to zap's testing helpers as well as the
+ability to marshal an AtomicLevel. It is fully backward-compatible.
+
+Enhancements:
+
+* [#415][]: Add a substring-filtering helper to zap's observer. This is
+ particularly useful when testing the `SugaredLogger`.
+* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
+
+## v1.2.0 (13 Apr 2017)
+
+This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
+
+Enhancements:
+
+* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
+ `grpclog.Logger`.
+
+## v1.1.0 (31 Mar 2017)
+
+This release fixes two bugs and adds some enhancements to zap's testing helpers.
+It is fully backward-compatible.
+
+Bugfixes:
+
+* [#385][]: Fix caller path trimming on Windows.
+* [#396][]: Fix a panic when attempting to use non-existent directories with
+ zap's configuration struct.
+
+Enhancements:
+
+* [#386][]: Add filtering helpers to zaptest's observing logger.
+
+Thanks to @moitias for contributing to this release.
+
+## v1.0.0 (14 Mar 2017)
+
+This is zap's first stable release. All exported APIs are now final, and no
+further breaking changes will be made in the 1.x release series. Anyone using a
+semver-aware dependency manager should now pin to `^1`.
+
+Breaking changes:
+
+* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without
+ casting from `[]byte` to `string`.
+* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`,
+ `zap.Logger`, and `zap.SugaredLogger`.
+* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to
+ clash with other testing helpers.
+
+Bugfixes:
+
+* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier
+ for tab-separated console output.
+* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to
+ work with concurrency-safe `WriteSyncer` implementations.
+* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux
+ systems.
+* [#373][]: Report the correct caller from zap's standard library
+ interoperability wrappers.
+
+Enhancements:
+
+* [#348][]: Add a registry allowing third-party encodings to work with zap's
+ built-in `Config`.
+* [#327][]: Make the representation of logger callers configurable (like times,
+ levels, and durations).
+* [#376][]: Allow third-party encoders to use their own buffer pools, which
+ removes the last performance advantage that zap's encoders have over plugins.
+* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple
+ `WriteSyncer`s and lock the result.
+* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in
+ Go 1.9).
+* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it
+ easier for particularly punctilious users to unit test their application's
+ logging.
+
+Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
+contributions to this release.
+
+## v1.0.0-rc.3 (7 Mar 2017)
+
+This is the third release candidate for zap's stable release. There are no
+breaking changes.
+
+Bugfixes:
+
+* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs
+ rather than `[]uint8`.
+
+Enhancements:
+
+* [#307][]: Users can opt into colored output for log levels.
+* [#353][]: In addition to hijacking the output of the standard library's
+ package-global logging functions, users can now construct a zap-backed
+ `log.Logger` instance.
+* [#311][]: Frames from common runtime functions and some of zap's internal
+ machinery are now omitted from stacktraces.
+
+Thanks to @ansel1 and @suyash for their contributions to this release.
+
+## v1.0.0-rc.2 (21 Feb 2017)
+
+This is the second release candidate for zap's stable release. It includes two
+breaking changes.
+
+Breaking changes:
+
+* [#316][]: Zap's global loggers are now fully concurrency-safe
+ (previously, users had to ensure that `ReplaceGlobals` was called before the
+ loggers were in use). However, they must now be accessed via the `L()` and
+ `S()` functions. Users can update their projects with
+
+ ```
+ gofmt -r "zap.L -> zap.L()" -w .
+ gofmt -r "zap.S -> zap.S()" -w .
+ ```
+* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid
+ JSON and YAML struct tags on all config structs. This release fixes the tags
+ and adds static analysis to prevent similar bugs in the future.
+
+Bugfixes:
+
+* [#321][]: Redirecting the standard library's `log` output now
+ correctly reports the logger's caller.
+
+Enhancements:
+
+* [#325][] and [#333][]: Zap now transparently supports non-standard, rich
+ errors like those produced by `github.com/pkg/errors`.
+* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is
+ now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) ->
+ zap.NewNop()' -w .`.
+* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a
+ more informative error.
+
+Thanks to @skipor and @chapsuk for their contributions to this release.
+
+## v1.0.0-rc.1 (14 Feb 2017)
+
+This is the first release candidate for zap's stable release. There are multiple
+breaking changes and improvements from the pre-release version. Most notably:
+
+* **Zap's import path is now "go.uber.org/zap"** — all users will
+ need to update their code.
+* User-facing types and functions remain in the `zap` package. Code relevant
+ largely to extension authors is now in the `zapcore` package.
+* The `zapcore.Core` type makes it easy for third-party packages to use zap's
+ internals but provide a different user-facing API.
+* `Logger` is now a concrete type instead of an interface.
+* A less verbose (though slower) logging API is included by default.
+* Package-global loggers `L` and `S` are included.
+* A human-friendly console encoder is included.
+* A declarative config struct allows common logger configurations to be managed
+ as configuration instead of code.
+* Sampling is more accurate, and doesn't depend on the standard library's shared
+ timer heap.
+
+## v0.1.0-beta.1 (6 Feb 2017)
+
+This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
+upgrade at their leisure. Since this is the first tagged release, there are no
+backward compatibility concerns and all functionality is new.
+
+Early zap adopters should pin to the 0.1.x minor version until they're ready to
+upgrade to the upcoming stable release.
+
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+[#402]: https://github.com/uber-go/zap/pull/402
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+[#487]: https://github.com/uber-go/zap/pull/487
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+[#504]: https://github.com/uber-go/zap/pull/504
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+[#614]: https://github.com/uber-go/zap/pull/614
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+[#751]: https://github.com/uber-go/zap/pull/751
+[#758]: https://github.com/uber-go/zap/pull/758
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..e327d9aa5c
--- /dev/null
+++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
@@ -0,0 +1,75 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age,
+body size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at oss-conduct@uber.com. The project
+team will review and investigate all complaints, and will respond in a way
+that it deems appropriate to the circumstances. The project team is obligated
+to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at
+[http://contributor-covenant.org/version/1/4][version].
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
new file mode 100644
index 0000000000..ea02f3cae2
--- /dev/null
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -0,0 +1,70 @@
+# Contributing
+
+We'd love your help making zap the very best structured logging library in Go!
+
+If you'd like to add new exported APIs, please [open an issue][open-issue]
+describing your proposal — discussing API changes ahead of time makes
+pull request review much smoother. In your issue, pull request, and any other
+communications, please remember to treat your fellow contributors with
+respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously.
+
+Note that you'll need to sign [Uber's Contributor License Agreement][cla]
+before we can accept any of your contributions. If necessary, a bot will remind
+you to accept the CLA when you open your pull request.
+
+## Setup
+
+[Fork][fork], then clone the repository:
+
+```bash
+mkdir -p $GOPATH/src/go.uber.org
+cd $GOPATH/src/go.uber.org
+git clone git@github.com:your_github_username/zap.git
+cd zap
+git remote add upstream https://github.com/uber-go/zap.git
+git fetch upstream
+```
+
+Make sure that the tests and the linters pass:
+
+```bash
+make test
+make lint
+```
+
+## Making Changes
+
+Start by creating a new branch for your changes:
+
+```bash
+cd $GOPATH/src/go.uber.org/zap
+git checkout master
+git fetch upstream
+git rebase upstream/master
+git checkout -b cool_new_feature
+```
+
+Make your changes, then ensure that `make lint` and `make test` still pass. If
+you're satisfied with your changes, push them to your fork.
+
+```bash
+git push origin cool_new_feature
+```
+
+Then use the GitHub UI to open a pull request.
+
+At this point, you're waiting on us to review your changes. We _try_ to respond
+to issues and pull requests within a few business days, and we may suggest some
+improvements or alternatives. Once your changes are approved, one of the
+project maintainers will merge them.
+
+We're much more likely to approve your changes if you:
+
+- Add tests for new functionality.
+- Write a [good commit message][commit-message].
+- Maintain backward compatibility.
+
+[fork]: https://github.com/uber-go/zap/fork
+[open-issue]: https://github.com/uber-go/zap/issues/new
+[cla]: https://cla-assistant.io/uber-go/zap
+[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md
new file mode 100644
index 0000000000..b183b20bc1
--- /dev/null
+++ b/vendor/go.uber.org/zap/FAQ.md
@@ -0,0 +1,164 @@
+# Frequently Asked Questions
+
+## Design
+
+### Why spend so much effort on logger performance?
+
+Of course, most applications won't notice the impact of a slow logger: they
+already take tens or hundreds of milliseconds for each operation, so an extra
+millisecond doesn't matter.
+
+On the other hand, why *not* make structured logging fast? The `SugaredLogger`
+isn't any harder to use than other logging packages, and the `Logger` makes
+structured logging possible in performance-sensitive contexts. Across a fleet
+of Go microservices, making each application even slightly more efficient adds
+up quickly.
+
+### Why aren't `Logger` and `SugaredLogger` interfaces?
+
+Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and
+`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points
+out][go-proverbs], "The bigger the interface, the weaker the abstraction."
+Interfaces are also rigid — *any* change requires releasing a new major
+version, since it breaks all third-party implementations.
+
+Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much
+abstraction, and it lets us add methods without introducing breaking changes.
+Your applications should define and depend upon an interface that includes
+just the methods you use.
+
+### Why are some of my logs missing?
+
+Logs are dropped intentionally by zap when sampling is enabled. The production
+configuration (as returned by `NewProductionConfig()` enables sampling which will
+cause repeated logs within a second to be sampled. See more details on why sampling
+is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs).
+
+### Why sample application logs?
+
+Applications often experience runs of errors, either because of a bug or
+because of a misbehaving user. Logging errors is usually a good idea, but it
+can easily make this bad situation worse: not only is your application coping
+with a flood of errors, it's also spending extra CPU cycles and I/O logging
+those errors. Since writes are typically serialized, logging limits throughput
+when you need it most.
+
+Sampling fixes this problem by dropping repetitive log entries. Under normal
+conditions, your application writes out every entry. When similar entries are
+logged hundreds or thousands of times each second, though, zap begins dropping
+duplicates to preserve throughput.
+
+### Why do the structured logging APIs take a message in addition to fields?
+
+Subjectively, we find it helpful to accompany structured context with a brief
+description. This isn't critical during development, but it makes debugging
+and operating unfamiliar systems much easier.
+
+More concretely, zap's sampling algorithm uses the message to identify
+duplicate entries. In our experience, this is a practical middle ground
+between random sampling (which often drops the exact entry that you need while
+debugging) and hashing the complete entry (which is prohibitively expensive).
+
+### Why include package-global loggers?
+
+Since so many other logging packages include a global logger, many
+applications aren't designed to accept loggers as explicit parameters.
+Changing function signatures is often a breaking change, so zap includes
+global loggers to simplify migration.
+
+Avoid them where possible.
+
+### Why include dedicated Panic and Fatal log levels?
+
+In general, application code should handle errors gracefully instead of using
+`panic` or `os.Exit`. However, every rule has exceptions, and it's common to
+crash when an error is truly unrecoverable. To avoid losing any information
+— especially the reason for the crash — the logger must flush any
+buffered entries before the process exits.
+
+Zap makes this easy by offering `Panic` and `Fatal` logging methods that
+automatically flush before exiting. Of course, this doesn't guarantee that
+logs will never be lost, but it eliminates a common error.
+
+See the discussion in uber-go/zap#207 for more details.
+
+### What's `DPanic`?
+
+`DPanic` stands for "panic in development." In development, it logs at
+`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to
+catch errors that are theoretically possible, but shouldn't actually happen,
+*without* crashing in production.
+
+If you've ever written code like this, you need `DPanic`:
+
+```go
+if err != nil {
+ panic(fmt.Sprintf("shouldn't ever get here: %v", err))
+}
+```
+
+## Installation
+
+### What does the error `expects import "go.uber.org/zap"` mean?
+
+Either zap was installed incorrectly or you're referencing the wrong package
+name in your code.
+
+Zap's source code happens to be hosted on GitHub, but the [import
+path][import-path] is `go.uber.org/zap`. This gives us, the project
+maintainers, the freedom to move the source code if necessary. However, it
+means that you need to take a little care when installing and using the
+package.
+
+If you follow two simple rules, everything should work: install zap with `go
+get -u go.uber.org/zap`, and always import it in your code with `import
+"go.uber.org/zap"`. Your code shouldn't contain *any* references to
+`github.com/uber-go/zap`.
+
+## Usage
+
+### Does zap support log rotation?
+
+Zap doesn't natively support rotating log files, since we prefer to leave this
+to an external program like `logrotate`.
+
+However, it's easy to integrate a log rotation package like
+[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`.
+
+```go
+// lumberjack.Logger is already safe for concurrent use, so we don't need to
+// lock it.
+w := zapcore.AddSync(&lumberjack.Logger{
+ Filename: "/var/log/myapp/foo.log",
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, // days
+})
+core := zapcore.NewCore(
+ zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
+ w,
+ zap.InfoLevel,
+)
+logger := zap.New(core)
+```
+
+## Extensions
+
+We'd love to support every logging need within zap itself, but we're only
+familiar with a handful of log ingestion systems, flag-parsing packages, and
+the like. Rather than merging code that we can't effectively debug and
+support, we'd rather grow an ecosystem of zap extensions.
+
+We're aware of the following extensions, but haven't used them ourselves:
+
+| Package | Integration |
+| --- | --- |
+| `github.com/tchap/zapext` | Sentry, syslog |
+| `github.com/fgrosse/zaptest` | Ginkgo |
+| `github.com/blendle/zapdriver` | Stackdriver |
+| `github.com/moul/zapgorm` | Gorm |
+| `github.com/moul/zapfilter` | Advanced filtering rules |
+
+[go-proverbs]: https://go-proverbs.github.io/
+[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths
+[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt
new file mode 100644
index 0000000000..6652bed45f
--- /dev/null
+++ b/vendor/go.uber.org/zap/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016-2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
new file mode 100644
index 0000000000..9b1bc3b0e1
--- /dev/null
+++ b/vendor/go.uber.org/zap/Makefile
@@ -0,0 +1,73 @@
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+STATICCHECK = $(GOBIN)/staticcheck
+BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
+
+# Directories containing independent Go modules.
+#
+# We track coverage only for the main module.
+MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
+
+# Many Go tools take file globs or directories as arguments instead of packages.
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: all
+all: lint test
+
+.PHONY: lint
+lint: $(GOLINT) $(STATICCHECK)
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking lint..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking staticcheck..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
+ @echo "Checking for license headers..."
+ @./checklicense.sh | tee -a lint.log
+ @[ ! -s lint.log ]
+ @echo "Checking 'go mod tidy'..."
+ @make tidy
+ @if ! git diff --quiet; then \
+ echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
+ git --no-pager diff; \
+ fi
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+.PHONY: test
+test:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
+
+.PHONY: cover
+cover:
+ go test -race -coverprofile=cover.out -coverpkg=./... ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: bench
+BENCH ?= .
+bench:
+ @$(foreach dir,$(MODULE_DIRS), ( \
+ cd $(dir) && \
+ go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \
+ ) &&) true
+
+.PHONY: updatereadme
+updatereadme:
+ rm -f README.md
+ cat .readme.tmpl | go run internal/readme/readme.go > README.md
+
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
new file mode 100644
index 0000000000..a553a428c8
--- /dev/null
+++ b/vendor/go.uber.org/zap/README.md
@@ -0,0 +1,133 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users _choose_ when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions)
+
+Log a message and 10 fields:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------------------ | :---------: | :-----------: | :---------------: |
+| :zap: zap | 2900 ns/op | +0% | 5 allocs/op |
+| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op |
+| zerolog | 10639 ns/op | +267% | 32 allocs/op |
+| go-kit | 14434 ns/op | +398% | 59 allocs/op |
+| logrus | 17104 ns/op | +490% | 81 allocs/op |
+| apex/log | 32424 ns/op | +1018% | 66 allocs/op |
+| log15 | 33579 ns/op | +1058% | 76 allocs/op |
+
+Log a message with a logger that already has 10 fields of context:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------------------ | :---------: | :-----------: | :---------------: |
+| :zap: zap | 373 ns/op | +0% | 0 allocs/op |
+| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op |
+| zerolog | 288 ns/op | -23% | 0 allocs/op |
+| go-kit | 11785 ns/op | +3060% | 58 allocs/op |
+| logrus | 19629 ns/op | +5162% | 70 allocs/op |
+| log15 | 21866 ns/op | +5762% | 72 allocs/op |
+| apex/log | 30890 ns/op | +8182% | 55 allocs/op |
+
+Log a static string, without any context or `printf`-style templating:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------------------ | :--------: | :-----------: | :---------------: |
+| :zap: zap | 381 ns/op | +0% | 0 allocs/op |
+| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op |
+| zerolog | 369 ns/op | -3% | 0 allocs/op |
+| standard library | 385 ns/op | +1% | 2 allocs/op |
+| go-kit | 606 ns/op | +59% | 11 allocs/op |
+| logrus | 1730 ns/op | +354% | 25 allocs/op |
+| apex/log | 1998 ns/op | +424% | 7 allocs/op |
+| log15 | 4546 ns/op | +1093% | 22 allocs/op |
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+
+
+Released under the [MIT License](LICENSE.txt).
+
+ In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
+
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
new file mode 100644
index 0000000000..5be3704a3e
--- /dev/null
+++ b/vendor/go.uber.org/zap/array.go
@@ -0,0 +1,320 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Array constructs a field with the given key and ArrayMarshaler. It provides
+// a flexible, but still type-safe and efficient, way to add array-like types
+// to the logging context. The struct's MarshalLogArray method is called lazily.
+func Array(key string, val zapcore.ArrayMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
+}
+
+// Bools constructs a field that carries a slice of bools.
+func Bools(key string, bs []bool) Field {
+ return Array(key, bools(bs))
+}
+
+// ByteStrings constructs a field that carries a slice of []byte, each of which
+// must be UTF-8 encoded text.
+func ByteStrings(key string, bss [][]byte) Field {
+ return Array(key, byteStringsArray(bss))
+}
+
+// Complex128s constructs a field that carries a slice of complex numbers.
+func Complex128s(key string, nums []complex128) Field {
+ return Array(key, complex128s(nums))
+}
+
+// Complex64s constructs a field that carries a slice of complex numbers.
+func Complex64s(key string, nums []complex64) Field {
+ return Array(key, complex64s(nums))
+}
+
+// Durations constructs a field that carries a slice of time.Durations.
+func Durations(key string, ds []time.Duration) Field {
+ return Array(key, durations(ds))
+}
+
+// Float64s constructs a field that carries a slice of floats.
+func Float64s(key string, nums []float64) Field {
+ return Array(key, float64s(nums))
+}
+
+// Float32s constructs a field that carries a slice of floats.
+func Float32s(key string, nums []float32) Field {
+ return Array(key, float32s(nums))
+}
+
+// Ints constructs a field that carries a slice of integers.
+func Ints(key string, nums []int) Field {
+ return Array(key, ints(nums))
+}
+
+// Int64s constructs a field that carries a slice of integers.
+func Int64s(key string, nums []int64) Field {
+ return Array(key, int64s(nums))
+}
+
+// Int32s constructs a field that carries a slice of integers.
+func Int32s(key string, nums []int32) Field {
+ return Array(key, int32s(nums))
+}
+
+// Int16s constructs a field that carries a slice of integers.
+func Int16s(key string, nums []int16) Field {
+ return Array(key, int16s(nums))
+}
+
+// Int8s constructs a field that carries a slice of integers.
+func Int8s(key string, nums []int8) Field {
+ return Array(key, int8s(nums))
+}
+
+// Strings constructs a field that carries a slice of strings.
+func Strings(key string, ss []string) Field {
+ return Array(key, stringArray(ss))
+}
+
+// Times constructs a field that carries a slice of time.Times.
+func Times(key string, ts []time.Time) Field {
+ return Array(key, times(ts))
+}
+
+// Uints constructs a field that carries a slice of unsigned integers.
+func Uints(key string, nums []uint) Field {
+ return Array(key, uints(nums))
+}
+
+// Uint64s constructs a field that carries a slice of unsigned integers.
+func Uint64s(key string, nums []uint64) Field {
+ return Array(key, uint64s(nums))
+}
+
+// Uint32s constructs a field that carries a slice of unsigned integers.
+func Uint32s(key string, nums []uint32) Field {
+ return Array(key, uint32s(nums))
+}
+
+// Uint16s constructs a field that carries a slice of unsigned integers.
+func Uint16s(key string, nums []uint16) Field {
+ return Array(key, uint16s(nums))
+}
+
+// Uint8s constructs a field that carries a slice of unsigned integers.
+func Uint8s(key string, nums []uint8) Field {
+ return Array(key, uint8s(nums))
+}
+
+// Uintptrs constructs a field that carries a slice of pointer addresses.
+func Uintptrs(key string, us []uintptr) Field {
+ return Array(key, uintptrs(us))
+}
+
+// Errors constructs a field that carries a slice of errors.
+func Errors(key string, errs []error) Field {
+ return Array(key, errArray(errs))
+}
+
+type bools []bool
+
+func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bs {
+ arr.AppendBool(bs[i])
+ }
+ return nil
+}
+
+type byteStringsArray [][]byte
+
+func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bss {
+ arr.AppendByteString(bss[i])
+ }
+ return nil
+}
+
+type complex128s []complex128
+
+func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex128(nums[i])
+ }
+ return nil
+}
+
+type complex64s []complex64
+
+func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex64(nums[i])
+ }
+ return nil
+}
+
+type durations []time.Duration
+
+func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ds {
+ arr.AppendDuration(ds[i])
+ }
+ return nil
+}
+
+type float64s []float64
+
+func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat64(nums[i])
+ }
+ return nil
+}
+
+type float32s []float32
+
+func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat32(nums[i])
+ }
+ return nil
+}
+
+type ints []int
+
+func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt(nums[i])
+ }
+ return nil
+}
+
+type int64s []int64
+
+func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt64(nums[i])
+ }
+ return nil
+}
+
+type int32s []int32
+
+func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt32(nums[i])
+ }
+ return nil
+}
+
+type int16s []int16
+
+func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt16(nums[i])
+ }
+ return nil
+}
+
+type int8s []int8
+
+func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt8(nums[i])
+ }
+ return nil
+}
+
+type stringArray []string
+
+func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ss {
+ arr.AppendString(ss[i])
+ }
+ return nil
+}
+
+type times []time.Time
+
+func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ts {
+ arr.AppendTime(ts[i])
+ }
+ return nil
+}
+
+type uints []uint
+
+func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint(nums[i])
+ }
+ return nil
+}
+
+type uint64s []uint64
+
+func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint64(nums[i])
+ }
+ return nil
+}
+
+type uint32s []uint32
+
+func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint32(nums[i])
+ }
+ return nil
+}
+
+type uint16s []uint16
+
+func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint16(nums[i])
+ }
+ return nil
+}
+
+type uint8s []uint8
+
+func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint8(nums[i])
+ }
+ return nil
+}
+
+type uintptrs []uintptr
+
+func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUintptr(nums[i])
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go
new file mode 100644
index 0000000000..d0d2c49d69
--- /dev/null
+++ b/vendor/go.uber.org/zap/array_go118.go
@@ -0,0 +1,156 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18
+// +build go1.18
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Author struct{ ... }
+// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var authors []Author = ...
+// logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+ return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ if err := arr.AppendObject(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+ *T
+ zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+ return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range os {
+ // It is necessary for us to explicitly reference the "P" type.
+ // We cannot simply pass "&os[i]" to AppendObject because its type
+ // is "*T", which the type system does not consider as
+ // implementing ObjectMarshaler.
+ // Only the type "P" satisfies ObjectMarshaler, which we have
+ // to convert "*T" to explicitly.
+ var p P = &os[i]
+ if err := arr.AppendObject(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
new file mode 100644
index 0000000000..9e929cd98e
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package buffer provides a thin wrapper around a byte slice. Unlike the
+// standard library's bytes.Buffer, it supports a portion of the strconv
+// package's zero-allocation formatters.
+package buffer // import "go.uber.org/zap/buffer"
+
+import (
+ "strconv"
+ "time"
+)
+
+const _size = 1024 // by default, create 1 KiB buffers
+
+// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
+// the only way to construct one is via a Pool.
+type Buffer struct {
+ bs []byte
+ pool Pool
+}
+
+// AppendByte writes a single byte to the Buffer.
+func (b *Buffer) AppendByte(v byte) {
+ b.bs = append(b.bs, v)
+}
+
+// AppendString writes a string to the Buffer.
+func (b *Buffer) AppendString(s string) {
+ b.bs = append(b.bs, s...)
+}
+
+// AppendInt appends an integer to the underlying buffer (assuming base 10).
+func (b *Buffer) AppendInt(i int64) {
+ b.bs = strconv.AppendInt(b.bs, i, 10)
+}
+
+// AppendTime appends the time formatted using the specified layout.
+func (b *Buffer) AppendTime(t time.Time, layout string) {
+ b.bs = t.AppendFormat(b.bs, layout)
+}
+
+// AppendUint appends an unsigned integer to the underlying buffer (assuming
+// base 10).
+func (b *Buffer) AppendUint(i uint64) {
+ b.bs = strconv.AppendUint(b.bs, i, 10)
+}
+
+// AppendBool appends a bool to the underlying buffer.
+func (b *Buffer) AppendBool(v bool) {
+ b.bs = strconv.AppendBool(b.bs, v)
+}
+
+// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
+// or +/- Inf.
+func (b *Buffer) AppendFloat(f float64, bitSize int) {
+ b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
+}
+
+// Len returns the length of the underlying byte slice.
+func (b *Buffer) Len() int {
+ return len(b.bs)
+}
+
+// Cap returns the capacity of the underlying byte slice.
+func (b *Buffer) Cap() int {
+ return cap(b.bs)
+}
+
+// Bytes returns a mutable reference to the underlying byte slice.
+func (b *Buffer) Bytes() []byte {
+ return b.bs
+}
+
+// String returns a string copy of the underlying byte slice.
+func (b *Buffer) String() string {
+ return string(b.bs)
+}
+
+// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
+// backing array.
+func (b *Buffer) Reset() {
+ b.bs = b.bs[:0]
+}
+
+// Write implements io.Writer.
+func (b *Buffer) Write(bs []byte) (int, error) {
+ b.bs = append(b.bs, bs...)
+ return len(bs), nil
+}
+
+// WriteByte writes a single byte to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteByte(v byte) error {
+ b.AppendByte(v)
+ return nil
+}
+
+// WriteString writes a string to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteString(s string) (int, error) {
+ b.AppendString(s)
+ return len(s), nil
+}
+
+// TrimNewline trims any final "\n" byte from the end of the buffer.
+func (b *Buffer) TrimNewline() {
+ if i := len(b.bs) - 1; i >= 0 {
+ if b.bs[i] == '\n' {
+ b.bs = b.bs[:i]
+ }
+ }
+}
+
+// Free returns the Buffer to its Pool.
+//
+// Callers must not retain references to the Buffer after calling Free.
+func (b *Buffer) Free() {
+ b.pool.put(b)
+}
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
new file mode 100644
index 0000000000..8fb3e202cf
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package buffer
+
+import "sync"
+
+// A Pool is a type-safe wrapper around a sync.Pool.
+type Pool struct {
+ p *sync.Pool
+}
+
+// NewPool constructs a new Pool.
+func NewPool() Pool {
+ return Pool{p: &sync.Pool{
+ New: func() interface{} {
+ return &Buffer{bs: make([]byte, 0, _size)}
+ },
+ }}
+}
+
+// Get retrieves a Buffer from the pool, creating one if necessary.
+func (p Pool) Get() *Buffer {
+ buf := p.p.Get().(*Buffer)
+ buf.Reset()
+ buf.pool = p
+ return buf
+}
+
+func (p Pool) put(buf *Buffer) {
+ p.p.Put(buf)
+}
diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh
new file mode 100644
index 0000000000..345ac8b89a
--- /dev/null
+++ b/vendor/go.uber.org/zap/checklicense.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+ERROR_COUNT=0
+while read -r file
+do
+ case "$(head -1 "${file}")" in
+ *"Copyright (c) "*" Uber Technologies, Inc.")
+ # everything's cool
+ ;;
+ *)
+ echo "$file is missing license header."
+ (( ERROR_COUNT++ ))
+ ;;
+ esac
+done < <(git ls-files "*\.go")
+
+exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
new file mode 100644
index 0000000000..ee6096766a
--- /dev/null
+++ b/vendor/go.uber.org/zap/config.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "sort"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
+// global CPU and I/O load that logging puts on your process while attempting
+// to preserve a representative subset of your logs.
+//
+// If specified, the Sampler will invoke the Hook after each decision.
+//
+// Values configured here are per-second. See zapcore.NewSamplerWithOptions for
+// details.
+type SamplingConfig struct {
+ Initial int `json:"initial" yaml:"initial"`
+ Thereafter int `json:"thereafter" yaml:"thereafter"`
+ Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"`
+}
+
+// Config offers a declarative way to construct a logger. It doesn't do
+// anything that can't be done with New, Options, and the various
+// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
+// toggle common options.
+//
+// Note that Config intentionally supports only the most common options. More
+// unusual logging setups (logging to network connections or message queues,
+// splitting output between multiple files, etc.) are possible, but require
+// direct use of the zapcore package. For sample code, see the package-level
+// BasicConfiguration and AdvancedConfiguration examples.
+//
+// For an example showing runtime log level changes, see the documentation for
+// AtomicLevel.
+type Config struct {
+ // Level is the minimum enabled logging level. Note that this is a dynamic
+ // level, so calling Config.Level.SetLevel will atomically change the log
+ // level of all loggers descended from this config.
+ Level AtomicLevel `json:"level" yaml:"level"`
+ // Development puts the logger in development mode, which changes the
+ // behavior of DPanicLevel and takes stacktraces more liberally.
+ Development bool `json:"development" yaml:"development"`
+ // DisableCaller stops annotating logs with the calling function's file
+ // name and line number. By default, all logs are annotated.
+ DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
+ // DisableStacktrace completely disables automatic stacktrace capturing. By
+ // default, stacktraces are captured for WarnLevel and above logs in
+ // development and ErrorLevel and above in production.
+ DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
+ // Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
+ Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
+ // Encoding sets the logger's encoding. Valid values are "json" and
+ // "console", as well as any third-party encodings registered via
+ // RegisterEncoder.
+ Encoding string `json:"encoding" yaml:"encoding"`
+ // EncoderConfig sets options for the chosen encoder. See
+ // zapcore.EncoderConfig for details.
+ EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
+ // OutputPaths is a list of URLs or file paths to write logging output to.
+ // See Open for details.
+ OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
+ // ErrorOutputPaths is a list of URLs to write internal logger errors to.
+ // The default is standard error.
+ //
+ // Note that this setting only affects internal errors; for sample code that
+ // sends error-level logs to a different location from info- and debug-level
+ // logs, see the package-level AdvancedConfiguration example.
+ ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
+ // InitialFields is a collection of fields to add to the root logger.
+ InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
+}
+
+// NewProductionEncoderConfig returns an opinionated EncoderConfig for
+// production environments.
+func NewProductionEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.EpochTimeEncoder,
+ EncodeDuration: zapcore.SecondsDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewProductionConfig is a reasonable production logging configuration.
+// Logging is enabled at InfoLevel and above.
+//
+// It uses a JSON encoder, writes to standard error, and enables sampling.
+// Stacktraces are automatically included on logs of ErrorLevel and above.
+func NewProductionConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(InfoLevel),
+ Development: false,
+ Sampling: &SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+ Encoding: "json",
+ EncoderConfig: NewProductionEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
+// development environments.
+func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ // Keys can be anything except the empty string.
+ TimeKey: "T",
+ LevelKey: "L",
+ NameKey: "N",
+ CallerKey: "C",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "M",
+ StacktraceKey: "S",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.CapitalLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewDevelopmentConfig is a reasonable development logging configuration.
+// Logging is enabled at DebugLevel and above.
+//
+// It enables development mode (which makes DPanicLevel logs panic), uses a
+// console encoder, writes to standard error, and disables sampling.
+// Stacktraces are automatically included on logs of WarnLevel and above.
+func NewDevelopmentConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(DebugLevel),
+ Development: true,
+ Encoding: "console",
+ EncoderConfig: NewDevelopmentEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// Build constructs a logger from the Config and Options.
+func (cfg Config) Build(opts ...Option) (*Logger, error) {
+ enc, err := cfg.buildEncoder()
+ if err != nil {
+ return nil, err
+ }
+
+ sink, errSink, err := cfg.openSinks()
+ if err != nil {
+ return nil, err
+ }
+
+ if cfg.Level == (AtomicLevel{}) {
+ return nil, errors.New("missing Level")
+ }
+
+ log := New(
+ zapcore.NewCore(enc, sink, cfg.Level),
+ cfg.buildOptions(errSink)...,
+ )
+ if len(opts) > 0 {
+ log = log.WithOptions(opts...)
+ }
+ return log, nil
+}
+
+func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
+ opts := []Option{ErrorOutput(errSink)}
+
+ if cfg.Development {
+ opts = append(opts, Development())
+ }
+
+ if !cfg.DisableCaller {
+ opts = append(opts, AddCaller())
+ }
+
+ stackLevel := ErrorLevel
+ if cfg.Development {
+ stackLevel = WarnLevel
+ }
+ if !cfg.DisableStacktrace {
+ opts = append(opts, AddStacktrace(stackLevel))
+ }
+
+ if scfg := cfg.Sampling; scfg != nil {
+ opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
+ var samplerOpts []zapcore.SamplerOption
+ if scfg.Hook != nil {
+ samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook))
+ }
+ return zapcore.NewSamplerWithOptions(
+ core,
+ time.Second,
+ cfg.Sampling.Initial,
+ cfg.Sampling.Thereafter,
+ samplerOpts...,
+ )
+ }))
+ }
+
+ if len(cfg.InitialFields) > 0 {
+ fs := make([]Field, 0, len(cfg.InitialFields))
+ keys := make([]string, 0, len(cfg.InitialFields))
+ for k := range cfg.InitialFields {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ fs = append(fs, Any(k, cfg.InitialFields[k]))
+ }
+ opts = append(opts, Fields(fs...))
+ }
+
+ return opts
+}
+
+func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
+ sink, closeOut, err := Open(cfg.OutputPaths...)
+ if err != nil {
+ return nil, nil, err
+ }
+ errSink, _, err := Open(cfg.ErrorOutputPaths...)
+ if err != nil {
+ closeOut()
+ return nil, nil, err
+ }
+ return sink, errSink, nil
+}
+
+func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
+ return newEncoder(cfg.Encoding, cfg.EncoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
new file mode 100644
index 0000000000..3c50d7b4d3
--- /dev/null
+++ b/vendor/go.uber.org/zap/doc.go
@@ -0,0 +1,117 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zap provides fast, structured, leveled logging.
+//
+// For applications that log in the hot path, reflection-based serialization
+// and string formatting are prohibitively expensive - they're CPU-intensive
+// and make many small allocations. Put differently, using json.Marshal and
+// fmt.Fprintf to log tons of interface{} makes your application slow.
+//
+// Zap takes a different approach. It includes a reflection-free,
+// zero-allocation JSON encoder, and the base Logger strives to avoid
+// serialization overhead and allocations wherever possible. By building the
+// high-level SugaredLogger on that foundation, zap lets users choose when
+// they need to count every allocation and when they'd prefer a more familiar,
+// loosely typed API.
+//
+// # Choosing a Logger
+//
+// In contexts where performance is nice, but not critical, use the
+// SugaredLogger. It's 4-10x faster than other structured logging packages and
+// supports both structured and printf-style logging. Like log15 and go-kit,
+// the SugaredLogger's structured logging APIs are loosely typed and accept a
+// variadic number of key-value pairs. (For more advanced use cases, they also
+// accept strongly typed fields - see the SugaredLogger.With documentation for
+// details.)
+//
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// By default, loggers are unbuffered. However, since zap's low-level APIs
+// allow buffering, calling Sync before letting your process exit is a good
+// habit.
+//
+// In the rare contexts where every microsecond and every allocation matter,
+// use the Logger. It's even faster than the SugaredLogger and allocates far
+// less, but it only supports strongly-typed, structured logging.
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
+//
+// Choosing between the Logger and SugaredLogger doesn't need to be an
+// application-wide decision: converting between the two is simple and
+// inexpensive.
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// # Configuring Zap
+//
+// The simplest way to build a Logger is to use zap's opinionated presets:
+// NewExample, NewProduction, and NewDevelopment. These presets build a logger
+// with a single function call:
+//
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
+//
+// Presets are fine for small projects, but larger projects and organizations
+// naturally require a bit more customization. For most users, zap's Config
+// struct strikes the right balance between flexibility and convenience. See
+// the package-level BasicConfiguration example for sample code.
+//
+// More unusual configurations (splitting output between files, sending logs
+// to a message queue, etc.) are possible, but require direct use of
+// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
+// example for sample code.
+//
+// # Extending Zap
+//
+// The zap package itself is a relatively thin wrapper around the interfaces
+// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
+// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
+// exception aggregation service, like Sentry or Rollbar) typically requires
+// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
+// interfaces. See the zapcore documentation for details.
+//
+// Similarly, package authors can use the high-performance Encoder and Core
+// implementations in the zapcore package to build their own loggers.
+//
+// # Frequently Asked Questions
+//
+// An FAQ covering everything from installation errors to design decisions is
+// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
+package zap // import "go.uber.org/zap"
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
new file mode 100644
index 0000000000..caa04ceefd
--- /dev/null
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var (
+ errNoEncoderNameSpecified = errors.New("no encoder name specified")
+
+ _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
+ "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewConsoleEncoder(encoderConfig), nil
+ },
+ "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewJSONEncoder(encoderConfig), nil
+ },
+ }
+ _encoderMutex sync.RWMutex
+)
+
+// RegisterEncoder registers an encoder constructor, which the Config struct
+// can then reference. By default, the "json" and "console" encoders are
+// registered.
+//
+// Attempting to register an encoder whose name is already taken returns an
+// error.
+func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
+ _encoderMutex.Lock()
+ defer _encoderMutex.Unlock()
+ if name == "" {
+ return errNoEncoderNameSpecified
+ }
+ if _, ok := _encoderNameToConstructor[name]; ok {
+ return fmt.Errorf("encoder already registered for name %q", name)
+ }
+ _encoderNameToConstructor[name] = constructor
+ return nil
+}
+
+func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
+ return nil, errors.New("missing EncodeTime in EncoderConfig")
+ }
+
+ _encoderMutex.RLock()
+ defer _encoderMutex.RUnlock()
+ if name == "" {
+ return nil, errNoEncoderNameSpecified
+ }
+ constructor, ok := _encoderNameToConstructor[name]
+ if !ok {
+ return nil, fmt.Errorf("no encoder registered for name %q", name)
+ }
+ return constructor(encoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
new file mode 100644
index 0000000000..65982a51e5
--- /dev/null
+++ b/vendor/go.uber.org/zap/error.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Error is shorthand for the common idiom NamedError("error", err).
+func Error(err error) Field {
+ return NamedError("error", err)
+}
+
+// NamedError constructs a field that lazily stores err.Error() under the
+// provided key. Errors which also implement fmt.Formatter (like those produced
+// by github.com/pkg/errors) will also have their verbose representation stored
+// under key+"Verbose". If passed a nil error, the field is a no-op.
+//
+// For the common case in which the key is simply "error", the Error function
+// is shorter and less repetitive.
+func NamedError(key string, err error) Field {
+ if err == nil {
+ return Skip()
+ }
+ return Field{Key: key, Type: zapcore.ErrorType, Interface: err}
+}
+
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+ // To represent each error as an object with an "error" attribute and
+ // potentially an "errorVerbose" attribute, we need to wrap it in a
+ // type that implements LogObjectMarshaler. To prevent this from
+ // allocating, pool the wrapper type.
+ elem := _errArrayElemPool.Get().(*errArrayElem)
+ elem.error = errs[i]
+ arr.AppendObject(elem)
+ elem.error = nil
+ _errArrayElemPool.Put(elem)
+ }
+ return nil
+}
+
+type errArrayElem struct {
+ error
+}
+
+func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ // Re-use the error field's logic, which supports non-standard error types.
+ Error(e.error).AddTo(enc)
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
new file mode 100644
index 0000000000..bbb745db5b
--- /dev/null
+++ b/vendor/go.uber.org/zap/field.go
@@ -0,0 +1,549 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Field is an alias for Field. Aliasing this type dramatically
+// improves the navigability of this package's API documentation.
+type Field = zapcore.Field
+
+var (
+ _minTimeInt64 = time.Unix(0, math.MinInt64)
+ _maxTimeInt64 = time.Unix(0, math.MaxInt64)
+)
+
+// Skip constructs a no-op field, which is often useful when handling invalid
+// inputs in other Field constructors.
+func Skip() Field {
+ return Field{Type: zapcore.SkipType}
+}
+
+// nilField returns a field which will marshal explicitly as nil. See motivation
+// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking
+// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the
+// implementation here should be changed to reflect that.
+func nilField(key string) Field { return Reflect(key, nil) }
+
+// Binary constructs a field that carries an opaque binary blob.
+//
+// Binary data is serialized in an encoding-appropriate format. For example,
+// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
+// use ByteString.
+func Binary(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.BinaryType, Interface: val}
+}
+
+// Bool constructs a field that carries a bool.
+func Bool(key string, val bool) Field {
+ var ival int64
+ if val {
+ ival = 1
+ }
+ return Field{Key: key, Type: zapcore.BoolType, Integer: ival}
+}
+
+// Boolp constructs a field that carries a *bool. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Boolp(key string, val *bool) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Bool(key, *val)
+}
+
+// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
+// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
+// Binary.
+func ByteString(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
+}
+
+// Complex128 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex128 to
+// interface{}).
+func Complex128(key string, val complex128) Field {
+ return Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
+}
+
+// Complex128p constructs a field that carries a *complex128. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex128p(key string, val *complex128) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex128(key, *val)
+}
+
+// Complex64 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex64 to
+// interface{}).
+func Complex64(key string, val complex64) Field {
+ return Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
+}
+
+// Complex64p constructs a field that carries a *complex64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex64p(key string, val *complex64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex64(key, *val)
+}
+
+// Float64 constructs a field that carries a float64. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float64(key string, val float64) Field {
+ return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
+}
+
+// Float64p constructs a field that carries a *float64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float64p(key string, val *float64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float64(key, *val)
+}
+
+// Float32 constructs a field that carries a float32. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float32(key string, val float32) Field {
+ return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
+}
+
+// Float32p constructs a field that carries a *float32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float32p(key string, val *float32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float32(key, *val)
+}
+
+// Int constructs a field with the given key and value.
+func Int(key string, val int) Field {
+ return Int64(key, int64(val))
+}
+
+// Intp constructs a field that carries a *int. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Intp(key string, val *int) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int(key, *val)
+}
+
+// Int64 constructs a field with the given key and value.
+func Int64(key string, val int64) Field {
+ return Field{Key: key, Type: zapcore.Int64Type, Integer: val}
+}
+
+// Int64p constructs a field that carries a *int64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int64p(key string, val *int64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int64(key, *val)
+}
+
+// Int32 constructs a field with the given key and value.
+func Int32(key string, val int32) Field {
+ return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
+}
+
+// Int32p constructs a field that carries a *int32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int32p(key string, val *int32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int32(key, *val)
+}
+
+// Int16 constructs a field with the given key and value.
+func Int16(key string, val int16) Field {
+ return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
+}
+
+// Int16p constructs a field that carries a *int16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int16p(key string, val *int16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int16(key, *val)
+}
+
+// Int8 constructs a field with the given key and value.
+func Int8(key string, val int8) Field {
+ return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
+}
+
+// Int8p constructs a field that carries a *int8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int8p(key string, val *int8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int8(key, *val)
+}
+
+// String constructs a field with the given key and value.
+func String(key string, val string) Field {
+ return Field{Key: key, Type: zapcore.StringType, String: val}
+}
+
+// Stringp constructs a field that carries a *string. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Stringp(key string, val *string) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return String(key, *val)
+}
+
+// Uint constructs a field with the given key and value.
+func Uint(key string, val uint) Field {
+ return Uint64(key, uint64(val))
+}
+
+// Uintp constructs a field that carries a *uint. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintp(key string, val *uint) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint(key, *val)
+}
+
+// Uint64 constructs a field with the given key and value.
+func Uint64(key string, val uint64) Field {
+ return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
+}
+
+// Uint64p constructs a field that carries a *uint64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint64p(key string, val *uint64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint64(key, *val)
+}
+
+// Uint32 constructs a field with the given key and value.
+func Uint32(key string, val uint32) Field {
+ return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
+}
+
+// Uint32p constructs a field that carries a *uint32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint32p(key string, val *uint32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint32(key, *val)
+}
+
+// Uint16 constructs a field with the given key and value.
+func Uint16(key string, val uint16) Field {
+ return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
+}
+
+// Uint16p constructs a field that carries a *uint16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint16p(key string, val *uint16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint16(key, *val)
+}
+
+// Uint8 constructs a field with the given key and value.
+func Uint8(key string, val uint8) Field {
+ return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
+}
+
+// Uint8p constructs a field that carries a *uint8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint8p(key string, val *uint8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint8(key, *val)
+}
+
+// Uintptr constructs a field with the given key and value.
+func Uintptr(key string, val uintptr) Field {
+ return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
+}
+
+// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintptrp(key string, val *uintptr) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uintptr(key, *val)
+}
+
+// Reflect constructs a field with the given key and an arbitrary object. It uses
+// an encoding-appropriate, reflection-based function to lazily serialize nearly
+// any object into the logging context, but it's relatively slow and
+// allocation-heavy. Outside tests, Any is always a better choice.
+//
+// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
+// includes the error message in the final log output.
+func Reflect(key string, val interface{}) Field {
+ return Field{Key: key, Type: zapcore.ReflectType, Interface: val}
+}
+
+// Namespace creates a named, isolated scope within the logger's context. All
+// subsequent fields will be added to the new namespace.
+//
+// This helps prevent key collisions when injecting loggers into sub-components
+// or third-party libraries.
+func Namespace(key string) Field {
+ return Field{Key: key, Type: zapcore.NamespaceType}
+}
+
+// Stringer constructs a field with the given key and the output of the value's
+// String method. The Stringer's String method is called lazily.
+func Stringer(key string, val fmt.Stringer) Field {
+ return Field{Key: key, Type: zapcore.StringerType, Interface: val}
+}
+
+// Time constructs a Field with the given key and value. The encoder
+// controls how the time is serialized.
+func Time(key string, val time.Time) Field {
+ if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) {
+ return Field{Key: key, Type: zapcore.TimeFullType, Interface: val}
+ }
+ return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
+}
+
+// Timep constructs a field that carries a *time.Time. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Timep(key string, val *time.Time) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Time(key, *val)
+}
+
+// Stack constructs a field that stores a stacktrace of the current goroutine
+// under provided key. Keep in mind that taking a stacktrace is eager and
+// expensive (relatively speaking); this function both makes an allocation and
+// takes about two microseconds.
+func Stack(key string) Field {
+ return StackSkip(key, 1) // skip Stack
+}
+
+// StackSkip constructs a field similarly to Stack, but also skips the given
+// number of frames from the top of the stacktrace.
+func StackSkip(key string, skip int) Field {
+ // Returning the stacktrace as a string costs an allocation, but saves us
+ // from expanding the zapcore.Field union struct to include a byte slice. Since
+ // taking a stacktrace is already so expensive (~10us), the extra allocation
+ // is okay.
+ return String(key, takeStacktrace(skip+1)) // skip StackSkip
+}
+
+// Duration constructs a field with the given key and value. The encoder
+// controls how the duration is serialized.
+func Duration(key string, val time.Duration) Field {
+ return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
+}
+
+// Durationp constructs a field that carries a *time.Duration. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Durationp(key string, val *time.Duration) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Duration(key, *val)
+}
+
+// Object constructs a field with the given key and ObjectMarshaler. It
+// provides a flexible, but still type-safe and efficient, way to add map- or
+// struct-like user-defined types to the logging context. The struct's
+// MarshalLogObject method is called lazily.
+func Object(key string, val zapcore.ObjectMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
+}
+
+// Inline constructs a Field that is similar to Object, but it
+// will add the elements of the provided ObjectMarshaler to the
+// current namespace.
+func Inline(val zapcore.ObjectMarshaler) Field {
+ return zapcore.Field{
+ Type: zapcore.InlineMarshalerType,
+ Interface: val,
+ }
+}
+
+// Any takes a key and an arbitrary value and chooses the best way to represent
+// them as a field, falling back to a reflection-based approach only if
+// necessary.
+//
+// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
+// them. To minimize surprises, []byte values are treated as binary blobs, byte
+// values are treated as uint8, and runes are always treated as integers.
+func Any(key string, value interface{}) Field {
+ switch val := value.(type) {
+ case zapcore.ObjectMarshaler:
+ return Object(key, val)
+ case zapcore.ArrayMarshaler:
+ return Array(key, val)
+ case bool:
+ return Bool(key, val)
+ case *bool:
+ return Boolp(key, val)
+ case []bool:
+ return Bools(key, val)
+ case complex128:
+ return Complex128(key, val)
+ case *complex128:
+ return Complex128p(key, val)
+ case []complex128:
+ return Complex128s(key, val)
+ case complex64:
+ return Complex64(key, val)
+ case *complex64:
+ return Complex64p(key, val)
+ case []complex64:
+ return Complex64s(key, val)
+ case float64:
+ return Float64(key, val)
+ case *float64:
+ return Float64p(key, val)
+ case []float64:
+ return Float64s(key, val)
+ case float32:
+ return Float32(key, val)
+ case *float32:
+ return Float32p(key, val)
+ case []float32:
+ return Float32s(key, val)
+ case int:
+ return Int(key, val)
+ case *int:
+ return Intp(key, val)
+ case []int:
+ return Ints(key, val)
+ case int64:
+ return Int64(key, val)
+ case *int64:
+ return Int64p(key, val)
+ case []int64:
+ return Int64s(key, val)
+ case int32:
+ return Int32(key, val)
+ case *int32:
+ return Int32p(key, val)
+ case []int32:
+ return Int32s(key, val)
+ case int16:
+ return Int16(key, val)
+ case *int16:
+ return Int16p(key, val)
+ case []int16:
+ return Int16s(key, val)
+ case int8:
+ return Int8(key, val)
+ case *int8:
+ return Int8p(key, val)
+ case []int8:
+ return Int8s(key, val)
+ case string:
+ return String(key, val)
+ case *string:
+ return Stringp(key, val)
+ case []string:
+ return Strings(key, val)
+ case uint:
+ return Uint(key, val)
+ case *uint:
+ return Uintp(key, val)
+ case []uint:
+ return Uints(key, val)
+ case uint64:
+ return Uint64(key, val)
+ case *uint64:
+ return Uint64p(key, val)
+ case []uint64:
+ return Uint64s(key, val)
+ case uint32:
+ return Uint32(key, val)
+ case *uint32:
+ return Uint32p(key, val)
+ case []uint32:
+ return Uint32s(key, val)
+ case uint16:
+ return Uint16(key, val)
+ case *uint16:
+ return Uint16p(key, val)
+ case []uint16:
+ return Uint16s(key, val)
+ case uint8:
+ return Uint8(key, val)
+ case *uint8:
+ return Uint8p(key, val)
+ case []byte:
+ return Binary(key, val)
+ case uintptr:
+ return Uintptr(key, val)
+ case *uintptr:
+ return Uintptrp(key, val)
+ case []uintptr:
+ return Uintptrs(key, val)
+ case time.Time:
+ return Time(key, val)
+ case *time.Time:
+ return Timep(key, val)
+ case []time.Time:
+ return Times(key, val)
+ case time.Duration:
+ return Duration(key, val)
+ case *time.Duration:
+ return Durationp(key, val)
+ case []time.Duration:
+ return Durations(key, val)
+ case error:
+ return NamedError(key, val)
+ case []error:
+ return Errors(key, val)
+ case fmt.Stringer:
+ return Stringer(key, val)
+ default:
+ return Reflect(key, val)
+ }
+}
diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go
new file mode 100644
index 0000000000..1312875072
--- /dev/null
+++ b/vendor/go.uber.org/zap/flag.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "flag"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// LevelFlag uses the standard library's flag.Var to declare a global flag
+// with the specified name, default, and usage guidance. The returned value is
+// a pointer to the value of the flag.
+//
+// If you don't want to use the flag package's global state, you can use any
+// non-nil *Level as a flag.Value with your own *flag.FlagSet.
+func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
+ lvl := defaultLevel
+ flag.Var(&lvl, name, usage)
+ return &lvl
+}
diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml
new file mode 100644
index 0000000000..8e1d05e9ab
--- /dev/null
+++ b/vendor/go.uber.org/zap/glide.yaml
@@ -0,0 +1,34 @@
+package: go.uber.org/zap
+license: MIT
+import:
+- package: go.uber.org/atomic
+ version: ^1
+- package: go.uber.org/multierr
+ version: ^1
+testImport:
+- package: github.com/satori/go.uuid
+- package: github.com/sirupsen/logrus
+- package: github.com/apex/log
+ subpackages:
+ - handlers/json
+- package: github.com/go-kit/kit
+ subpackages:
+ - log
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
+- package: gopkg.in/inconshreveable/log15.v2
+- package: github.com/mattn/goveralls
+- package: github.com/pborman/uuid
+- package: github.com/pkg/errors
+- package: github.com/rs/zerolog
+- package: golang.org/x/tools
+ subpackages:
+ - cover
+- package: golang.org/x/lint
+ subpackages:
+ - golint
+- package: github.com/axw/gocov
+ subpackages:
+ - gocov
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
new file mode 100644
index 0000000000..3cb46c9e0a
--- /dev/null
+++ b/vendor/go.uber.org/zap/global.go
@@ -0,0 +1,169 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ _stdLogDefaultDepth = 1
+ _loggerWriterDepth = 2
+ _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
+ "https://github.com/uber-go/zap/issues/new and reference this error: %v"
+)
+
+var (
+ _globalMu sync.RWMutex
+ _globalL = NewNop()
+ _globalS = _globalL.Sugar()
+)
+
+// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
+// It's safe for concurrent use.
+func L() *Logger {
+ _globalMu.RLock()
+ l := _globalL
+ _globalMu.RUnlock()
+ return l
+}
+
+// S returns the global SugaredLogger, which can be reconfigured with
+// ReplaceGlobals. It's safe for concurrent use.
+func S() *SugaredLogger {
+ _globalMu.RLock()
+ s := _globalS
+ _globalMu.RUnlock()
+ return s
+}
+
+// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
+// function to restore the original values. It's safe for concurrent use.
+func ReplaceGlobals(logger *Logger) func() {
+ _globalMu.Lock()
+ prev := _globalL
+ _globalL = logger
+ _globalS = logger.Sugar()
+ _globalMu.Unlock()
+ return func() { ReplaceGlobals(prev) }
+}
+
+// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
+// InfoLevel. To redirect the standard library's package-global logging
+// functions, use RedirectStdLog instead.
+func NewStdLog(l *Logger) *log.Logger {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ f := logger.Info
+ return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
+}
+
+// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
+// required level.
+func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
+}
+
+// RedirectStdLog redirects output from the standard library's package-global
+// logger to the supplied logger at InfoLevel. Since zap already handles caller
+// annotations, timestamps, etc., it automatically disables the standard
+// library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLog(l *Logger) func() {
+ f, err := redirectStdLogAt(l, InfoLevel)
+ if err != nil {
+ // Can't get here, since passing InfoLevel to redirectStdLogAt always
+ // works.
+ panic(fmt.Sprintf(_programmerErrorTemplate, err))
+ }
+ return f
+}
+
+// RedirectStdLogAt redirects output from the standard library's package-global
+// logger to the supplied logger at the specified level. Since zap already
+// handles caller annotations, timestamps, etc., it automatically disables the
+// standard library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ return redirectStdLogAt(l, level)
+}
+
+func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ flags := log.Flags()
+ prefix := log.Prefix()
+ log.SetFlags(0)
+ log.SetPrefix("")
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ log.SetOutput(&loggerWriter{logFunc})
+ return func() {
+ log.SetFlags(flags)
+ log.SetPrefix(prefix)
+ log.SetOutput(os.Stderr)
+ }, nil
+}
+
+func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) {
+ switch lvl {
+ case DebugLevel:
+ return logger.Debug, nil
+ case InfoLevel:
+ return logger.Info, nil
+ case WarnLevel:
+ return logger.Warn, nil
+ case ErrorLevel:
+ return logger.Error, nil
+ case DPanicLevel:
+ return logger.DPanic, nil
+ case PanicLevel:
+ return logger.Panic, nil
+ case FatalLevel:
+ return logger.Fatal, nil
+ }
+ return nil, fmt.Errorf("unrecognized level: %q", lvl)
+}
+
+type loggerWriter struct {
+ logFunc func(msg string, fields ...Field)
+}
+
+func (l *loggerWriter) Write(p []byte) (int, error) {
+ p = bytes.TrimSpace(p)
+ l.logFunc(string(p))
+ return len(p), nil
+}
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
new file mode 100644
index 0000000000..632b6831a8
--- /dev/null
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// ServeHTTP is a simple JSON endpoint that can report on or change the current
+// logging level.
+//
+// # GET
+//
+// The GET request returns a JSON description of the current logging level like:
+//
+// {"level":"info"}
+//
+// # PUT
+//
+// The PUT request changes the logging level. It is perfectly safe to change the
+// logging level while a program is running. Two content types are supported:
+//
+// Content-Type: application/x-www-form-urlencoded
+//
+// With this content type, the level can be provided through the request body or
+// a query parameter. The log level is URL encoded like:
+//
+// level=debug
+//
+// The request body takes precedence over the query parameter, if both are
+// specified.
+//
+// This content type is the default for a curl PUT request. Following are two
+// example curl requests that both set the logging level to debug.
+//
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
+//
+// For any other content type, the payload is expected to be JSON encoded and
+// look like:
+//
+// {"level":"info"}
+//
+// An example curl request could look like this:
+//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
+func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ type errorResponse struct {
+ Error string `json:"error"`
+ }
+ type payload struct {
+ Level zapcore.Level `json:"level"`
+ }
+
+ enc := json.NewEncoder(w)
+
+ switch r.Method {
+ case http.MethodGet:
+ enc.Encode(payload{Level: lvl.Level()})
+ case http.MethodPut:
+ requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ enc.Encode(errorResponse{Error: err.Error()})
+ return
+ }
+ lvl.SetLevel(requestedLvl)
+ enc.Encode(payload{Level: lvl.Level()})
+ default:
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ enc.Encode(errorResponse{
+ Error: "Only GET and PUT are supported.",
+ })
+ }
+}
+
+// Decodes incoming PUT requests and returns the requested logging level.
+func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) {
+ if contentType == "application/x-www-form-urlencoded" {
+ return decodePutURL(r)
+ }
+ return decodePutJSON(r.Body)
+}
+
+func decodePutURL(r *http.Request) (zapcore.Level, error) {
+ lvl := r.FormValue("level")
+ if lvl == "" {
+ return 0, errors.New("must specify logging level")
+ }
+ var l zapcore.Level
+ if err := l.UnmarshalText([]byte(lvl)); err != nil {
+ return 0, err
+ }
+ return l, nil
+}
+
+func decodePutJSON(body io.Reader) (zapcore.Level, error) {
+ var pld struct {
+ Level *zapcore.Level `json:"level"`
+ }
+ if err := json.NewDecoder(body).Decode(&pld); err != nil {
+ return 0, fmt.Errorf("malformed request body: %v", err)
+ }
+ if pld.Level == nil {
+ return 0, errors.New("must specify logging level")
+ }
+ return *pld.Level, nil
+
+}
diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
new file mode 100644
index 0000000000..dad583aaa5
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package bufferpool houses zap's shared internal buffer pool. Third-party
+// packages can recreate the same functionality with buffers.NewPool.
+package bufferpool
+
+import "go.uber.org/zap/buffer"
+
+var (
+ _pool = buffer.NewPool()
+ // Get retrieves a buffer from the pool, creating one if necessary.
+ Get = _pool.Get
+)
diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go
new file mode 100644
index 0000000000..c4d5d02abc
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/color/color.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package color adds coloring functionality for TTY output.
+package color
+
+import "fmt"
+
+// Foreground colors.
+const (
+ Black Color = iota + 30
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ White
+)
+
+// Color represents a text color.
+type Color uint8
+
+// Add adds the coloring to the given string.
+func (c Color) Add(s string) string {
+ return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s)
+}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
new file mode 100644
index 0000000000..f673f9947b
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -0,0 +1,66 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package exit provides stubs so that unit tests can exercise code that calls
+// os.Exit(1).
+package exit
+
+import "os"
+
+var _exit = os.Exit
+
+// With terminates the process by calling os.Exit(code). If the package is
+// stubbed, it instead records a call in the testing spy.
+func With(code int) {
+ _exit(code)
+}
+
+// A StubbedExit is a testing fake for os.Exit.
+type StubbedExit struct {
+ Exited bool
+ Code int
+ prev func(code int)
+}
+
+// Stub substitutes a fake for the call to os.Exit(1).
+func Stub() *StubbedExit {
+ s := &StubbedExit{prev: _exit}
+ _exit = s.exit
+ return s
+}
+
+// WithStub runs the supplied function with Exit stubbed. It returns the stub
+// used, so that users can test whether the process would have crashed.
+func WithStub(f func()) *StubbedExit {
+ s := Stub()
+ defer s.Unstub()
+ f()
+ return s
+}
+
+// Unstub restores the previous exit function.
+func (se *StubbedExit) Unstub() {
+ _exit = se.prev
+}
+
+func (se *StubbedExit) exit(code int) {
+ se.Exited = true
+ se.Code = code
+}
diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go
new file mode 100644
index 0000000000..5f3e3f1b92
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package internal
+
+import "go.uber.org/zap/zapcore"
+
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+ zapcore.LevelEnabler
+
+ Level() zapcore.Level
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
new file mode 100644
index 0000000000..db951e19a5
--- /dev/null
+++ b/vendor/go.uber.org/zap/level.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "go.uber.org/atomic"
+ "go.uber.org/zap/internal"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel = zapcore.DebugLevel
+ // InfoLevel is the default logging priority.
+ InfoLevel = zapcore.InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel = zapcore.WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel = zapcore.ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel = zapcore.DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel = zapcore.PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel = zapcore.FatalLevel
+)
+
+// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
+// an anonymous function.
+//
+// It's particularly useful when splitting log output between different
+// outputs (e.g., standard error and standard out). For sample code, see the
+// package-level AdvancedConfiguration example.
+type LevelEnablerFunc func(zapcore.Level) bool
+
+// Enabled calls the wrapped function.
+func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
+
+// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
+// you safely change the log level of a tree of loggers (the root logger and
+// any children created by adding context) at runtime.
+//
+// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
+// alter its level.
+//
+// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
+// their internal atomic pointer.
+type AtomicLevel struct {
+ l *atomic.Int32
+}
+
+var _ internal.LeveledEnabler = AtomicLevel{}
+
+// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
+// enabled.
+func NewAtomicLevel() AtomicLevel {
+ return AtomicLevel{
+ l: atomic.NewInt32(int32(InfoLevel)),
+ }
+}
+
+// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
+// and then calls SetLevel with the given level.
+func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
+ a := NewAtomicLevel()
+ a.SetLevel(l)
+ return a
+}
+
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+ a := NewAtomicLevel()
+ l, err := zapcore.ParseLevel(text)
+ if err != nil {
+ return a, err
+ }
+
+ a.SetLevel(l)
+ return a, nil
+}
+
+// Enabled implements the zapcore.LevelEnabler interface, which allows the
+// AtomicLevel to be used in place of traditional static levels.
+func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
+ return lvl.Level().Enabled(l)
+}
+
+// Level returns the minimum enabled log level.
+func (lvl AtomicLevel) Level() zapcore.Level {
+ return zapcore.Level(int8(lvl.l.Load()))
+}
+
+// SetLevel alters the logging level.
+func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
+ lvl.l.Store(int32(l))
+}
+
+// String returns the string representation of the underlying Level.
+func (lvl AtomicLevel) String() string {
+ return lvl.Level().String()
+}
+
+// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
+// representations as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
+ if lvl.l == nil {
+ lvl.l = &atomic.Int32{}
+ }
+
+ var l zapcore.Level
+ if err := l.UnmarshalText(text); err != nil {
+ return err
+ }
+
+ lvl.SetLevel(l)
+ return nil
+}
+
+// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
+// text representation as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
+ return lvl.Level().MarshalText()
+}
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
new file mode 100644
index 0000000000..cd44030d13
--- /dev/null
+++ b/vendor/go.uber.org/zap/logger.go
@@ -0,0 +1,400 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/zapcore"
+)
+
+// A Logger provides fast, leveled, structured logging. All methods are safe
+// for concurrent use.
+//
+// The Logger is designed for contexts in which every microsecond and every
+// allocation matters, so its API intentionally favors performance and type
+// safety over brevity. For most applications, the SugaredLogger strikes a
+// better balance between performance and ergonomics.
+type Logger struct {
+ core zapcore.Core
+
+ development bool
+ addCaller bool
+ onFatal zapcore.CheckWriteHook // default is WriteThenFatal
+
+ name string
+ errorOutput zapcore.WriteSyncer
+
+ addStack zapcore.LevelEnabler
+
+ callerSkip int
+
+ clock zapcore.Clock
+}
+
+// New constructs a new Logger from the provided zapcore.Core and Options. If
+// the passed zapcore.Core is nil, it falls back to using a no-op
+// implementation.
+//
+// This is the most flexible way to construct a Logger, but also the most
+// verbose. For typical use cases, the highly-opinionated presets
+// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
+// more convenient.
+//
+// For sample code, see the package-level AdvancedConfiguration example.
+func New(core zapcore.Core, options ...Option) *Logger {
+ if core == nil {
+ return NewNop()
+ }
+ log := &Logger{
+ core: core,
+ errorOutput: zapcore.Lock(os.Stderr),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+ return log.WithOptions(options...)
+}
+
+// NewNop returns a no-op Logger. It never writes out logs or internal errors,
+// and it never runs user-defined hooks.
+//
+// Using WithOptions to replace the Core or error output of a no-op Logger can
+// re-enable logging.
+func NewNop() *Logger {
+ return &Logger{
+ core: zapcore.NewNopCore(),
+ errorOutput: zapcore.AddSync(io.Discard),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+}
+
+// NewProduction builds a sensible production Logger that writes InfoLevel and
+// above logs to standard error as JSON.
+//
+// It's a shortcut for NewProductionConfig().Build(...Option).
+func NewProduction(options ...Option) (*Logger, error) {
+ return NewProductionConfig().Build(options...)
+}
+
+// NewDevelopment builds a development Logger that writes DebugLevel and above
+// logs to standard error in a human-friendly format.
+//
+// It's a shortcut for NewDevelopmentConfig().Build(...Option).
+func NewDevelopment(options ...Option) (*Logger, error) {
+ return NewDevelopmentConfig().Build(options...)
+}
+
+// Must is a helper that wraps a call to a function returning (*Logger, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initialization such as:
+//
+// var logger = zap.Must(zap.NewProduction())
+func Must(logger *Logger, err error) *Logger {
+ if err != nil {
+ panic(err)
+ }
+
+ return logger
+}
+
+// NewExample builds a Logger that's designed for use in zap's testable
+// examples. It writes DebugLevel and above logs to standard out as JSON, but
+// omits the timestamp and calling function to keep example output
+// short and deterministic.
+func NewExample(options ...Option) *Logger {
+ encoderCfg := zapcore.EncoderConfig{
+ MessageKey: "msg",
+ LevelKey: "level",
+ NameKey: "logger",
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ }
+ core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
+ return New(core).WithOptions(options...)
+}
+
+// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
+// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
+// single application to use both Loggers and SugaredLoggers, converting
+// between them on the boundaries of performance-sensitive code.
+func (log *Logger) Sugar() *SugaredLogger {
+ core := log.clone()
+ core.callerSkip += 2
+ return &SugaredLogger{core}
+}
+
+// Named adds a new path segment to the logger's name. Segments are joined by
+// periods. By default, Loggers are unnamed.
+func (log *Logger) Named(s string) *Logger {
+ if s == "" {
+ return log
+ }
+ l := log.clone()
+ if log.name == "" {
+ l.name = s
+ } else {
+ l.name = strings.Join([]string{l.name, s}, ".")
+ }
+ return l
+}
+
+// WithOptions clones the current Logger, applies the supplied Options, and
+// returns the resulting Logger. It's safe to use concurrently.
+func (log *Logger) WithOptions(opts ...Option) *Logger {
+ c := log.clone()
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+ return c
+}
+
+// With creates a child logger and adds structured context to it. Fields added
+// to the child don't affect the parent, and vice versa.
+func (log *Logger) With(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ l := log.clone()
+ l.core = l.core.With(fields)
+ return l
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+ return zapcore.LevelOf(log.core)
+}
+
+// Check returns a CheckedEntry if logging a message at the specified level
+// is enabled. It's a completely optional optimization; in high-performance
+// applications, Check can help avoid allocating a slice to hold fields.
+func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ return log.check(lvl, msg)
+}
+
+// Log logs a message at the specified level. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
+ if ce := log.check(lvl, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Debug logs a message at DebugLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Debug(msg string, fields ...Field) {
+ if ce := log.check(DebugLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Info logs a message at InfoLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Info(msg string, fields ...Field) {
+ if ce := log.check(InfoLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Warn logs a message at WarnLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Warn(msg string, fields ...Field) {
+ if ce := log.check(WarnLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Error logs a message at ErrorLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Error(msg string, fields ...Field) {
+ if ce := log.check(ErrorLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// DPanic logs a message at DPanicLevel. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+//
+// If the logger is in development mode, it then panics (DPanic means
+// "development panic"). This is useful for catching errors that are
+// recoverable, but shouldn't ever happen.
+func (log *Logger) DPanic(msg string, fields ...Field) {
+ if ce := log.check(DPanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Panic logs a message at PanicLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then panics, even if logging at PanicLevel is disabled.
+func (log *Logger) Panic(msg string, fields ...Field) {
+ if ce := log.check(PanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Fatal logs a message at FatalLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then calls os.Exit(1), even if logging at FatalLevel is
+// disabled.
+func (log *Logger) Fatal(msg string, fields ...Field) {
+ if ce := log.check(FatalLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Sync calls the underlying Core's Sync method, flushing any buffered log
+// entries. Applications should take care to call Sync before exiting.
+func (log *Logger) Sync() error {
+ return log.core.Sync()
+}
+
+// Core returns the Logger's underlying zapcore.Core.
+func (log *Logger) Core() zapcore.Core {
+ return log.core
+}
+
+func (log *Logger) clone() *Logger {
+ copy := *log
+ return ©
+}
+
+func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ // Logger.check must always be called directly by a method in the
+ // Logger interface (e.g., Check, Info, Fatal).
+ // This skips Logger.check and the Info/Fatal/Check/etc. method that
+ // called it.
+ const callerSkipOffset = 2
+
+ // Check the level first to reduce the cost of disabled log calls.
+ // Since Panic and higher may exit, we skip the optimization for those levels.
+ if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) {
+ return nil
+ }
+
+ // Create basic checked entry thru the core; this will be non-nil if the
+ // log message will actually be written somewhere.
+ ent := zapcore.Entry{
+ LoggerName: log.name,
+ Time: log.clock.Now(),
+ Level: lvl,
+ Message: msg,
+ }
+ ce := log.core.Check(ent, nil)
+ willWrite := ce != nil
+
+ // Set up any required terminal behavior.
+ switch ent.Level {
+ case zapcore.PanicLevel:
+ ce = ce.After(ent, zapcore.WriteThenPanic)
+ case zapcore.FatalLevel:
+ onFatal := log.onFatal
+ // nil or WriteThenNoop will lead to continued execution after
+ // a Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the
+ // log.Fatal.
+ if onFatal == nil || onFatal == zapcore.WriteThenNoop {
+ onFatal = zapcore.WriteThenFatal
+ }
+ ce = ce.After(ent, onFatal)
+ case zapcore.DPanicLevel:
+ if log.development {
+ ce = ce.After(ent, zapcore.WriteThenPanic)
+ }
+ }
+
+ // Only do further annotation if we're going to write this message; checked
+ // entries that exist only for terminal behavior don't benefit from
+ // annotation.
+ if !willWrite {
+ return ce
+ }
+
+ // Thread the error output through to the CheckedEntry.
+ ce.ErrorOutput = log.errorOutput
+
+ addStack := log.addStack.Enabled(ce.Level)
+ if !log.addCaller && !addStack {
+ return ce
+ }
+
+ // Adding the caller or stack trace requires capturing the callers of
+ // this function. We'll share information between these two.
+ stackDepth := stacktraceFirst
+ if addStack {
+ stackDepth = stacktraceFull
+ }
+ stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
+ defer stack.Free()
+
+ if stack.Count() == 0 {
+ if log.addCaller {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
+ log.errorOutput.Sync()
+ }
+ return ce
+ }
+
+ frame, more := stack.Next()
+
+ if log.addCaller {
+ ce.Caller = zapcore.EntryCaller{
+ Defined: frame.PC != 0,
+ PC: frame.PC,
+ File: frame.File,
+ Line: frame.Line,
+ Function: frame.Function,
+ }
+ }
+
+ if addStack {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := newStackFormatter(buffer)
+
+ // We've already extracted the first frame, so format that
+ // separately and defer to stackfmt for the rest.
+ stackfmt.FormatFrame(frame)
+ if more {
+ stackfmt.FormatStack(stack)
+ }
+ ce.Stack = buffer.String()
+ }
+
+ return ce
+}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
new file mode 100644
index 0000000000..c4f3bca3d2
--- /dev/null
+++ b/vendor/go.uber.org/zap/options.go
@@ -0,0 +1,167 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// An Option configures a Logger.
+type Option interface {
+ apply(*Logger)
+}
+
+// optionFunc wraps a func so it satisfies the Option interface.
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
+func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = f(log.core)
+ })
+}
+
+// Hooks registers functions which will be called each time the Logger writes
+// out an Entry. Repeated use of Hooks is additive.
+//
+// Hooks are useful for simple side effects, like capturing metrics for the
+// number of emitted logs. More complex side effects, including anything that
+// requires access to the Entry's structured fields, should be implemented as
+// a zapcore.Core instead. See zapcore.RegisterHooks for details.
+func Hooks(hooks ...func(zapcore.Entry) error) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = zapcore.RegisterHooks(log.core, hooks...)
+ })
+}
+
+// Fields adds fields to the Logger.
+func Fields(fs ...Field) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = log.core.With(fs)
+ })
+}
+
+// ErrorOutput sets the destination for errors generated by the Logger. Note
+// that this option only affects internal errors; for sample code that sends
+// error-level logs to a different location from info- and debug-level logs,
+// see the package-level AdvancedConfiguration example.
+//
+// The supplied WriteSyncer must be safe for concurrent use. The Open and
+// zapcore.Lock functions are the simplest ways to protect files with a mutex.
+func ErrorOutput(w zapcore.WriteSyncer) Option {
+ return optionFunc(func(log *Logger) {
+ log.errorOutput = w
+ })
+}
+
+// Development puts the logger in development mode, which makes DPanic-level
+// logs panic instead of simply logging an error.
+func Development() Option {
+ return optionFunc(func(log *Logger) {
+ log.development = true
+ })
+}
+
+// AddCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller. See also WithCaller.
+func AddCaller() Option {
+ return WithCaller(true)
+}
+
+// WithCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller, or not, depending on the
+// value of enabled. This is a generalized form of AddCaller.
+func WithCaller(enabled bool) Option {
+ return optionFunc(func(log *Logger) {
+ log.addCaller = enabled
+ })
+}
+
+// AddCallerSkip increases the number of callers skipped by caller annotation
+// (as enabled by the AddCaller option). When building wrappers around the
+// Logger and SugaredLogger, supplying this Option prevents zap from always
+// reporting the wrapper code as the caller.
+func AddCallerSkip(skip int) Option {
+ return optionFunc(func(log *Logger) {
+ log.callerSkip += skip
+ })
+}
+
+// AddStacktrace configures the Logger to record a stack trace for all messages at
+// or above a given level.
+func AddStacktrace(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ log.addStack = lvl
+ })
+}
+
+// IncreaseLevel increase the level of the logger. It has no effect if
+// the passed in level tries to decrease the level of the logger.
+func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ core, err := zapcore.NewIncreaseLevelCore(log.core, lvl)
+ if err != nil {
+ fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err)
+ } else {
+ log.core = core
+ }
+ })
+}
+
+// OnFatal sets the action to take on fatal logs.
+//
+// Deprecated: Use [WithFatalHook] instead.
+func OnFatal(action zapcore.CheckWriteAction) Option {
+ return WithFatalHook(action)
+}
+
+// WithFatalHook sets a CheckWriteHook to run on fatal logs.
+// Zap will call this hook after writing a log statement with a Fatal level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a fatal log message, but it will not exit the
+// program.
+//
+// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
+//
+// It is important that the provided CheckWriteHook stops the control flow at
+// the current statement to meet expectations of callers of the logger.
+// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
+// minimum.
+func WithFatalHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onFatal = hook
+ })
+}
+
+// WithClock specifies the clock used by the logger to determine the current
+// time for logged entries. Defaults to the system clock with time.Now.
+func WithClock(clock zapcore.Clock) Option {
+ return optionFunc(func(log *Logger) {
+ log.clock = clock
+ })
+}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
new file mode 100644
index 0000000000..478c9a10ff
--- /dev/null
+++ b/vendor/go.uber.org/zap/sink.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const schemeFile = "file"
+
+var _sinkRegistry = newSinkRegistry()
+
+// Sink defines the interface to write to and close logger destinations.
+type Sink interface {
+ zapcore.WriteSyncer
+ io.Closer
+}
+
+type errSinkNotFound struct {
+ scheme string
+}
+
+func (e *errSinkNotFound) Error() string {
+ return fmt.Sprintf("no sink found for scheme %q", e.scheme)
+}
+
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+ mu sync.Mutex
+ factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+ openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+ sr := &sinkRegistry{
+ factories: make(map[string]func(*url.URL) (Sink, error)),
+ openFile: os.OpenFile,
+ }
+ sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
+
+ if scheme == "" {
+ return errors.New("can't register a sink factory for empty string")
+ }
+ normalized, err := normalizeScheme(scheme)
+ if err != nil {
+ return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
+ }
+ if _, ok := sr.factories[normalized]; ok {
+ return fmt.Errorf("sink factory already registered for scheme %q", normalized)
+ }
+ sr.factories[normalized] = factory
+ return nil
+}
+
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+ // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+ // the drive, and path is unset unless `c:/log.txt` is used.
+ // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+ // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+ if filepath.IsAbs(rawURL) {
+ return sr.newFileSinkFromPath(rawURL)
+ }
+
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
+ }
+ if u.Scheme == "" {
+ u.Scheme = schemeFile
+ }
+
+ sr.mu.Lock()
+ factory, ok := sr.factories[u.Scheme]
+ sr.mu.Unlock()
+ if !ok {
+ return nil, &errSinkNotFound{u.Scheme}
+ }
+ return factory(u)
+}
+
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
+ if u.User != nil {
+ return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
+ }
+ if u.Fragment != "" {
+ return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u)
+ }
+ if u.RawQuery != "" {
+ return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u)
+ }
+ // Error messages are better if we check hostname and port separately.
+ if u.Port() != "" {
+ return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u)
+ }
+ if hn := u.Hostname(); hn != "" && hn != "localhost" {
+ return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
+ }
+
+ return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+ switch path {
+ case "stdout":
+ return nopCloserSink{os.Stdout}, nil
+ case "stderr":
+ return nopCloserSink{os.Stderr}, nil
+ }
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+}
+
+func normalizeScheme(s string) (string, error) {
+ // https://tools.ietf.org/html/rfc3986#section-3.1
+ s = strings.ToLower(s)
+ if first := s[0]; 'a' > first || 'z' < first {
+ return "", errors.New("must start with a letter")
+ }
+ for i := 1; i < len(s); i++ { // iterate over bytes, not runes
+ c := s[i]
+ switch {
+ case 'a' <= c && c <= 'z':
+ continue
+ case '0' <= c && c <= '9':
+ continue
+ case c == '.' || c == '+' || c == '-':
+ continue
+ }
+ return "", fmt.Errorf("may not contain %q", c)
+ }
+ return s, nil
+}
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
new file mode 100644
index 0000000000..817a3bde8b
--- /dev/null
+++ b/vendor/go.uber.org/zap/stacktrace.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "runtime"
+ "sync"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+var _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return &stacktrace{
+ storage: make([]uintptr, 64),
+ }
+ },
+}
+
+type stacktrace struct {
+ pcs []uintptr // program counters; always a subslice of storage
+ frames *runtime.Frames
+
+ // The size of pcs varies depending on requirements:
+ // it will be one if the only the first frame was requested,
+ // and otherwise it will reflect the depth of the call stack.
+ //
+ // storage decouples the slice we need (pcs) from the slice we pool.
+ // We will always allocate a reasonably large storage, but we'll use
+ // only as much of it as we need.
+ storage []uintptr
+}
+
+// stacktraceDepth specifies how deep of a stack trace should be captured.
+type stacktraceDepth int
+
+const (
+ // stacktraceFirst captures only the first frame.
+ stacktraceFirst stacktraceDepth = iota
+
+ // stacktraceFull captures the entire call stack, allocating more
+ // storage for it if needed.
+ stacktraceFull
+)
+
+// captureStacktrace captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// captureStacktrace.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
+ stack := _stacktracePool.Get().(*stacktrace)
+
+ switch depth {
+ case stacktraceFirst:
+ stack.pcs = stack.storage[:1]
+ case stacktraceFull:
+ stack.pcs = stack.storage
+ }
+
+ // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+ // itself. +2 to skip captureStacktrace and runtime.Callers.
+ numFrames := runtime.Callers(
+ skip+2,
+ stack.pcs,
+ )
+
+ // runtime.Callers truncates the recorded stacktrace if there is no
+ // room in the provided slice. For the full stack trace, keep expanding
+ // storage until there are fewer frames than there is room.
+ if depth == stacktraceFull {
+ pcs := stack.pcs
+ for numFrames == len(pcs) {
+ pcs = make([]uintptr, len(pcs)*2)
+ numFrames = runtime.Callers(skip+2, pcs)
+ }
+
+ // Discard old storage instead of returning it to the pool.
+ // This will adjust the pool size over time if stack traces are
+ // consistently very deep.
+ stack.storage = pcs
+ stack.pcs = pcs[:numFrames]
+ } else {
+ stack.pcs = stack.pcs[:numFrames]
+ }
+
+ stack.frames = runtime.CallersFrames(stack.pcs)
+ return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *stacktrace) Free() {
+ st.frames = nil
+ st.pcs = nil
+ _stacktracePool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *stacktrace) Count() int {
+ return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
+ return st.frames.Next()
+}
+
+func takeStacktrace(skip int) string {
+ stack := captureStacktrace(skip+1, stacktraceFull)
+ defer stack.Free()
+
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := newStackFormatter(buffer)
+ stackfmt.FormatStack(stack)
+ return buffer.String()
+}
+
+// stackFormatter formats a stack trace into a readable string representation.
+type stackFormatter struct {
+ b *buffer.Buffer
+ nonEmpty bool // whehther we've written at least one frame already
+}
+
+// newStackFormatter builds a new stackFormatter.
+func newStackFormatter(b *buffer.Buffer) stackFormatter {
+ return stackFormatter{b: b}
+}
+
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *stackFormatter) FormatStack(stack *stacktrace) {
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := stack.Next(); more; frame, more = stack.Next() {
+ sf.FormatFrame(frame)
+ }
+}
+
+// FormatFrame formats the given frame.
+func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
+ if sf.nonEmpty {
+ sf.b.AppendByte('\n')
+ }
+ sf.nonEmpty = true
+ sf.b.AppendString(frame.Function)
+ sf.b.AppendByte('\n')
+ sf.b.AppendByte('\t')
+ sf.b.AppendString(frame.File)
+ sf.b.AppendByte(':')
+ sf.b.AppendInt(int64(frame.Line))
+}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
new file mode 100644
index 0000000000..ac387b3e47
--- /dev/null
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -0,0 +1,416 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ _oddNumberErrMsg = "Ignored key without a value."
+ _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+ _multipleErrMsg = "Multiple errors without a key."
+)
+
+// A SugaredLogger wraps the base Logger functionality in a slower, but less
+// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
+// method.
+//
+// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
+// For each log level, it exposes four methods:
+//
+// - methods named after the log level for log.Print-style logging
+// - methods ending in "w" for loosely-typed structured logging
+// - methods ending in "f" for log.Printf-style logging
+// - methods ending in "ln" for log.Println-style logging
+//
+// For example, the methods for InfoLevel are:
+//
+// Info(...any) Print-style logging
+// Infow(...any) Structured logging (read as "info with")
+// Infof(string, ...any) Printf-style logging
+// Infoln(...any) Println-style logging
+type SugaredLogger struct {
+ base *Logger
+}
+
+// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
+// is quite inexpensive, so it's reasonable for a single application to use
+// both Loggers and SugaredLoggers, converting between them on the boundaries
+// of performance-sensitive code.
+func (s *SugaredLogger) Desugar() *Logger {
+ base := s.base.clone()
+ base.callerSkip -= 2
+ return base
+}
+
+// Named adds a sub-scope to the logger's name. See Logger.Named for details.
+func (s *SugaredLogger) Named(name string) *SugaredLogger {
+ return &SugaredLogger{base: s.base.Named(name)}
+}
+
+// WithOptions clones the current SugaredLogger, applies the supplied Options,
+// and returns the result. It's safe to use concurrently.
+func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
+ base := s.base.clone()
+ for _, opt := range opts {
+ opt.apply(base)
+ }
+ return &SugaredLogger{base: base}
+}
+
+// With adds a variadic number of fields to the logging context. It accepts a
+// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
+// processing pairs, the first element of the pair is used as the field key
+// and the second as the field value.
+//
+// For example,
+//
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+//
+// is the equivalent of
+//
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
+//
+// Note that the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics. In production, the logger is more
+// forgiving: a separate error is logged, but the key-value pair is skipped
+// and execution continues. Passing an orphaned key triggers similar behavior:
+// panics in development and errors in production.
+func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+ return zapcore.LevelOf(s.base.core)
+}
+
+// Debug uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Debug(args ...interface{}) {
+ s.log(DebugLevel, "", args, nil)
+}
+
+// Info uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Info(args ...interface{}) {
+ s.log(InfoLevel, "", args, nil)
+}
+
+// Warn uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Warn(args ...interface{}) {
+ s.log(WarnLevel, "", args, nil)
+}
+
+// Error uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Error(args ...interface{}) {
+ s.log(ErrorLevel, "", args, nil)
+}
+
+// DPanic uses fmt.Sprint to construct and log a message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanic(args ...interface{}) {
+ s.log(DPanicLevel, "", args, nil)
+}
+
+// Panic uses fmt.Sprint to construct and log a message, then panics.
+func (s *SugaredLogger) Panic(args ...interface{}) {
+ s.log(PanicLevel, "", args, nil)
+}
+
+// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+func (s *SugaredLogger) Fatal(args ...interface{}) {
+ s.log(FatalLevel, "", args, nil)
+}
+
+// Debugf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
+ s.log(DebugLevel, template, args, nil)
+}
+
+// Infof uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Infof(template string, args ...interface{}) {
+ s.log(InfoLevel, template, args, nil)
+}
+
+// Warnf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
+ s.log(WarnLevel, template, args, nil)
+}
+
+// Errorf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
+ s.log(ErrorLevel, template, args, nil)
+}
+
+// DPanicf uses fmt.Sprintf to log a templated message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
+ s.log(DPanicLevel, template, args, nil)
+}
+
+// Panicf uses fmt.Sprintf to log a templated message, then panics.
+func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
+ s.log(PanicLevel, template, args, nil)
+}
+
+// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
+ s.log(FatalLevel, template, args, nil)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+//
+// When debug-level logging is disabled, this is much faster than
+//
+// s.With(keysAndValues).Debug(msg)
+func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
+ s.log(DebugLevel, msg, nil, keysAndValues)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
+ s.log(InfoLevel, msg, nil, keysAndValues)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
+ s.log(WarnLevel, msg, nil, keysAndValues)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
+ s.log(ErrorLevel, msg, nil, keysAndValues)
+}
+
+// DPanicw logs a message with some additional context. In development, the
+// logger then panics. (See DPanicLevel for details.) The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
+ s.log(DPanicLevel, msg, nil, keysAndValues)
+}
+
+// Panicw logs a message with some additional context, then panics. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
+ s.log(PanicLevel, msg, nil, keysAndValues)
+}
+
+// Fatalw logs a message with some additional context, then calls os.Exit. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
+ s.log(FatalLevel, msg, nil, keysAndValues)
+}
+
+// Debugln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Debugln(args ...interface{}) {
+ s.logln(DebugLevel, args, nil)
+}
+
+// Infoln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Infoln(args ...interface{}) {
+ s.logln(InfoLevel, args, nil)
+}
+
+// Warnln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Warnln(args ...interface{}) {
+ s.logln(WarnLevel, args, nil)
+}
+
+// Errorln uses fmt.Sprintln to construct and log a message.
+func (s *SugaredLogger) Errorln(args ...interface{}) {
+ s.logln(ErrorLevel, args, nil)
+}
+
+// DPanicln uses fmt.Sprintln to construct and log a message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanicln(args ...interface{}) {
+ s.logln(DPanicLevel, args, nil)
+}
+
+// Panicln uses fmt.Sprintln to construct and log a message, then panics.
+func (s *SugaredLogger) Panicln(args ...interface{}) {
+ s.logln(PanicLevel, args, nil)
+}
+
+// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit.
+func (s *SugaredLogger) Fatalln(args ...interface{}) {
+ s.logln(FatalLevel, args, nil)
+}
+
+// Sync flushes any buffered log entries.
+func (s *SugaredLogger) Sync() error {
+ return s.base.Sync()
+}
+
+// log message with Sprint, Sprintf, or neither.
+func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
+ // If logging at this level is completely disabled, skip the overhead of
+ // string formatting.
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessage(template, fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
+// logln message with Sprintln
+func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessageln(fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
+// getMessage format with Sprint, Sprintf, or neither.
+func getMessage(template string, fmtArgs []interface{}) string {
+ if len(fmtArgs) == 0 {
+ return template
+ }
+
+ if template != "" {
+ return fmt.Sprintf(template, fmtArgs...)
+ }
+
+ if len(fmtArgs) == 1 {
+ if str, ok := fmtArgs[0].(string); ok {
+ return str
+ }
+ }
+ return fmt.Sprint(fmtArgs...)
+}
+
+// getMessageln format with Sprintln.
+func getMessageln(fmtArgs []interface{}) string {
+ msg := fmt.Sprintln(fmtArgs...)
+ return msg[:len(msg)-1]
+}
+
+func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
+ if len(args) == 0 {
+ return nil
+ }
+
+ var (
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields = make([]Field, 0, len(args))
+ invalid invalidPairs
+ seenError bool
+ )
+
+ for i := 0; i < len(args); {
+ // This is a strongly-typed field. Consume it and move on.
+ if f, ok := args[i].(Field); ok {
+ fields = append(fields, f)
+ i++
+ continue
+ }
+
+ // If it is an error, consume it and move on.
+ if err, ok := args[i].(error); ok {
+ if !seenError {
+ seenError = true
+ fields = append(fields, Error(err))
+ } else {
+ s.base.Error(_multipleErrMsg, Error(err))
+ }
+ i++
+ continue
+ }
+
+ // Make sure this element isn't a dangling key.
+ if i == len(args)-1 {
+ s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
+ break
+ }
+
+ // Consume this value and the next, treating them as a key-value pair. If the
+ // key isn't a string, add this pair to the slice of invalid pairs.
+ key, val := args[i], args[i+1]
+ if keyStr, ok := key.(string); !ok {
+ // Subsequent errors are likely, so allocate once up front.
+ if cap(invalid) == 0 {
+ invalid = make(invalidPairs, 0, len(args)/2)
+ }
+ invalid = append(invalid, invalidPair{i, key, val})
+ } else {
+ fields = append(fields, Any(keyStr, val))
+ }
+ i += 2
+ }
+
+ // If we encountered any invalid key-value pairs, log an error.
+ if len(invalid) > 0 {
+ s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
+ }
+ return fields
+}
+
+type invalidPair struct {
+ position int
+ key, value interface{}
+}
+
+func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddInt64("position", int64(p.position))
+ Any("key", p.key).AddTo(enc)
+ Any("value", p.value).AddTo(enc)
+ return nil
+}
+
+type invalidPairs []invalidPair
+
+func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ var err error
+ for i := range ps {
+ err = multierr.Append(err, enc.AppendObject(ps[i]))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go
new file mode 100644
index 0000000000..c5a1f16225
--- /dev/null
+++ b/vendor/go.uber.org/zap/time.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "time"
+
+func timeToMillis(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond)
+}
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
new file mode 100644
index 0000000000..f08728e1ec
--- /dev/null
+++ b/vendor/go.uber.org/zap/writer.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+// Open is a high-level wrapper that takes a variadic number of URLs, opens or
+// creates each of the specified resources, and combines them into a locked
+// WriteSyncer. It also returns any error encountered and a function to close
+// any opened files.
+//
+// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a
+// scheme and URLs with the "file" scheme. Third-party code may register
+// factories for other schemes using RegisterSink.
+//
+// URLs with the "file" scheme must use absolute paths on the local
+// filesystem. No user, password, port, fragments, or query parameters are
+// allowed, and the hostname must be empty or "localhost".
+//
+// Since it's common to write logs to the local filesystem, URLs without a
+// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without
+// a scheme, the special paths "stdout" and "stderr" are interpreted as
+// os.Stdout and os.Stderr. When specified without a scheme, relative file
+// paths also work.
+func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
+ writers, close, err := open(paths)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ writer := CombineWriteSyncers(writers...)
+ return writer, close, nil
+}
+
+func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
+ writers := make([]zapcore.WriteSyncer, 0, len(paths))
+ closers := make([]io.Closer, 0, len(paths))
+ close := func() {
+ for _, c := range closers {
+ c.Close()
+ }
+ }
+
+ var openErr error
+ for _, path := range paths {
+ sink, err := _sinkRegistry.newSink(path)
+ if err != nil {
+ openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
+ continue
+ }
+ writers = append(writers, sink)
+ closers = append(closers, sink)
+ }
+ if openErr != nil {
+ close()
+ return nil, nil, openErr
+ }
+
+ return writers, close, nil
+}
+
+// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
+// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
+// WriteSyncer.
+//
+// It's provided purely as a convenience; the result is no different from
+// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
+func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
+ if len(writers) == 0 {
+ return zapcore.AddSync(io.Discard)
+ }
+ return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
+}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
new file mode 100644
index 0000000000..a40e93b3ec
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -0,0 +1,219 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bufio"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ // _defaultBufferSize specifies the default size used by Buffer.
+ _defaultBufferSize = 256 * 1024 // 256 kB
+
+ // _defaultFlushInterval specifies the default flush interval for
+ // Buffer.
+ _defaultFlushInterval = 30 * time.Second
+)
+
+// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
+// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
+// fixed interval--whichever comes first.
+//
+// BufferedWriteSyncer is safe for concurrent use. You don't need to use
+// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+//
+// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
+// destination (*os.File is a valid WriteSyncer), wrap it with
+// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
+// object.
+//
+// func main() {
+// ws := ... // your log destination
+// bws := &zapcore.BufferedWriteSyncer{WS: ws}
+// defer bws.Stop()
+//
+// // ...
+// core := zapcore.NewCore(enc, bws, lvl)
+// logger := zap.New(core)
+//
+// // ...
+// }
+//
+// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
+// waiting at most 30 seconds between flushes.
+// You can customize these parameters by setting the Size or FlushInterval
+// fields.
+// For example, the following buffers up to 512 kB of logs before flushing them
+// to Stderr, with a maximum of one minute between each flush.
+//
+// ws := &BufferedWriteSyncer{
+// WS: os.Stderr,
+// Size: 512 * 1024, // 512 kB
+// FlushInterval: time.Minute,
+// }
+// defer ws.Stop()
+type BufferedWriteSyncer struct {
+ // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
+ // writes.
+ //
+ // This field is required.
+ WS WriteSyncer
+
+ // Size specifies the maximum amount of data the writer will buffered
+ // before flushing.
+ //
+ // Defaults to 256 kB if unspecified.
+ Size int
+
+ // FlushInterval specifies how often the writer should flush data if
+ // there have been no writes.
+ //
+ // Defaults to 30 seconds if unspecified.
+ FlushInterval time.Duration
+
+ // Clock, if specified, provides control of the source of time for the
+ // writer.
+ //
+ // Defaults to the system clock.
+ Clock Clock
+
+ // unexported fields for state
+ mu sync.Mutex
+ initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
+ writer *bufio.Writer
+ ticker *time.Ticker
+ stop chan struct{} // closed when flushLoop should stop
+ done chan struct{} // closed when flushLoop has stopped
+}
+
+func (s *BufferedWriteSyncer) initialize() {
+ size := s.Size
+ if size == 0 {
+ size = _defaultBufferSize
+ }
+
+ flushInterval := s.FlushInterval
+ if flushInterval == 0 {
+ flushInterval = _defaultFlushInterval
+ }
+
+ if s.Clock == nil {
+ s.Clock = DefaultClock
+ }
+
+ s.ticker = s.Clock.NewTicker(flushInterval)
+ s.writer = bufio.NewWriterSize(s.WS, size)
+ s.stop = make(chan struct{})
+ s.done = make(chan struct{})
+ s.initialized = true
+ go s.flushLoop()
+}
+
+// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
+// and log data will be flushed to disk when the buffer is full or periodically.
+func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ s.initialize()
+ }
+
+ // To avoid partial writes from being flushed, we manually flush the existing buffer if:
+ // * The current write doesn't fit into the buffer fully, and
+ // * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
+ if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
+ if err := s.writer.Flush(); err != nil {
+ return 0, err
+ }
+ }
+
+ return s.writer.Write(bs)
+}
+
+// Sync flushes buffered log data into disk directly.
+func (s *BufferedWriteSyncer) Sync() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var err error
+ if s.initialized {
+ err = s.writer.Flush()
+ }
+
+ return multierr.Append(err, s.WS.Sync())
+}
+
+// flushLoop flushes the buffer at the configured interval until Stop is
+// called.
+func (s *BufferedWriteSyncer) flushLoop() {
+ defer close(s.done)
+
+ for {
+ select {
+ case <-s.ticker.C:
+ // we just simply ignore error here
+ // because the underlying bufio writer stores any errors
+ // and we return any error from Sync() as part of the close
+ _ = s.Sync()
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+// Stop closes the buffer, cleans up background goroutines, and flushes
+// remaining unwritten data.
+func (s *BufferedWriteSyncer) Stop() (err error) {
+ var stopped bool
+
+ // Critical section.
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ return
+ }
+
+ stopped = s.stopped
+ if stopped {
+ return
+ }
+ s.stopped = true
+
+ s.ticker.Stop()
+ close(s.stop) // tell flushLoop to stop
+ <-s.done // and wait until it has
+ }()
+
+ // Don't call Sync on consecutive Stops.
+ if !stopped {
+ err = s.Sync()
+ }
+
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
new file mode 100644
index 0000000000..422fd82a6b
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// DefaultClock is the default clock used by Zap in operations that require
+// time. This clock uses the system clock for all operations.
+var DefaultClock = systemClock{}
+
+// Clock is a source of time for logged entries.
+type Clock interface {
+ // Now returns the current local time.
+ Now() time.Time
+
+ // NewTicker returns *time.Ticker that holds a channel
+ // that delivers "ticks" of a clock.
+ NewTicker(time.Duration) *time.Ticker
+}
+
+// systemClock implements default Clock that uses system time.
+type systemClock struct{}
+
+func (systemClock) Now() time.Time {
+ return time.Now()
+}
+
+func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
+ return time.NewTicker(duration)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
new file mode 100644
index 0000000000..1aa5dc3646
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+var _sliceEncoderPool = sync.Pool{
+ New: func() interface{} {
+ return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
+ },
+}
+
+func getSliceEncoder() *sliceArrayEncoder {
+ return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+}
+
+func putSliceEncoder(e *sliceArrayEncoder) {
+ e.elems = e.elems[:0]
+ _sliceEncoderPool.Put(e)
+}
+
+type consoleEncoder struct {
+ *jsonEncoder
+}
+
+// NewConsoleEncoder creates an encoder whose output is designed for human -
+// rather than machine - consumption. It serializes the core log entry data
+// (message, level, timestamp, etc.) in a plain-text format and leaves the
+// structured context as JSON.
+//
+// Note that although the console encoder doesn't use the keys specified in the
+// encoder configuration, it will omit any element whose key is set to the empty
+// string.
+func NewConsoleEncoder(cfg EncoderConfig) Encoder {
+ if cfg.ConsoleSeparator == "" {
+ // Use a default delimiter of '\t' for backwards compatibility
+ cfg.ConsoleSeparator = "\t"
+ }
+ return consoleEncoder{newJSONEncoder(cfg, true)}
+}
+
+func (c consoleEncoder) Clone() Encoder {
+ return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)}
+}
+
+func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ line := bufferpool.Get()
+
+ // We don't want the entry's metadata to be quoted and escaped (if it's
+ // encoded as strings), which means that we can't use the JSON encoder. The
+ // simplest option is to use the memory encoder and fmt.Fprint.
+ //
+ // If this ever becomes a performance bottleneck, we can implement
+ // ArrayEncoder for our plain-text format.
+ arr := getSliceEncoder()
+ if c.TimeKey != "" && c.EncodeTime != nil {
+ c.EncodeTime(ent.Time, arr)
+ }
+ if c.LevelKey != "" && c.EncodeLevel != nil {
+ c.EncodeLevel(ent.Level, arr)
+ }
+ if ent.LoggerName != "" && c.NameKey != "" {
+ nameEncoder := c.EncodeName
+
+ if nameEncoder == nil {
+ // Fall back to FullNameEncoder for backward compatibility.
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, arr)
+ }
+ if ent.Caller.Defined {
+ if c.CallerKey != "" && c.EncodeCaller != nil {
+ c.EncodeCaller(ent.Caller, arr)
+ }
+ if c.FunctionKey != "" {
+ arr.AppendString(ent.Caller.Function)
+ }
+ }
+ for i := range arr.elems {
+ if i > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+ fmt.Fprint(line, arr.elems[i])
+ }
+ putSliceEncoder(arr)
+
+ // Add the message itself.
+ if c.MessageKey != "" {
+ c.addSeparatorIfNecessary(line)
+ line.AppendString(ent.Message)
+ }
+
+ // Add any structured context.
+ c.writeContext(line, fields)
+
+ // If there's no stacktrace key, honor that; this allows users to force
+ // single-line output.
+ if ent.Stack != "" && c.StacktraceKey != "" {
+ line.AppendByte('\n')
+ line.AppendString(ent.Stack)
+ }
+
+ line.AppendString(c.LineEnding)
+ return line, nil
+}
+
+func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) {
+ context := c.jsonEncoder.Clone().(*jsonEncoder)
+ defer func() {
+ // putJSONEncoder assumes the buffer is still used, but we write out the buffer so
+ // we can free it.
+ context.buf.Free()
+ putJSONEncoder(context)
+ }()
+
+ addFields(context, extra)
+ context.closeOpenNamespaces()
+ if context.buf.Len() == 0 {
+ return
+ }
+
+ c.addSeparatorIfNecessary(line)
+ line.AppendByte('{')
+ line.Write(context.buf.Bytes())
+ line.AppendByte('}')
+}
+
+func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) {
+ if line.Len() > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
new file mode 100644
index 0000000000..9dfd64051f
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// Core is a minimal, fast logger interface. It's designed for library authors
+// to wrap in a more user-friendly API.
+type Core interface {
+ LevelEnabler
+
+ // With adds structured context to the Core.
+ With([]Field) Core
+ // Check determines whether the supplied Entry should be logged (using the
+ // embedded LevelEnabler and possibly some extra logic). If the entry
+ // should be logged, the Core adds itself to the CheckedEntry and returns
+ // the result.
+ //
+ // Callers must use Check before calling Write.
+ Check(Entry, *CheckedEntry) *CheckedEntry
+ // Write serializes the Entry and any Fields supplied at the log site and
+ // writes them to their destination.
+ //
+ // If called, Write should always log the Entry and Fields; it should not
+ // replicate the logic of Check.
+ Write(Entry, []Field) error
+ // Sync flushes buffered logs (if any).
+ Sync() error
+}
+
+type nopCore struct{}
+
+// NewNopCore returns a no-op Core.
+func NewNopCore() Core { return nopCore{} }
+func (nopCore) Enabled(Level) bool { return false }
+func (n nopCore) With([]Field) Core { return n }
+func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce }
+func (nopCore) Write(Entry, []Field) error { return nil }
+func (nopCore) Sync() error { return nil }
+
+// NewCore creates a Core that writes logs to a WriteSyncer.
+func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
+ return &ioCore{
+ LevelEnabler: enab,
+ enc: enc,
+ out: ws,
+ }
+}
+
+type ioCore struct {
+ LevelEnabler
+ enc Encoder
+ out WriteSyncer
+}
+
+var (
+ _ Core = (*ioCore)(nil)
+ _ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+ return LevelOf(c.LevelEnabler)
+}
+
+func (c *ioCore) With(fields []Field) Core {
+ clone := c.clone()
+ addFields(clone.enc, fields)
+ return clone
+}
+
+func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if c.Enabled(ent.Level) {
+ return ce.AddCore(ent, c)
+ }
+ return ce
+}
+
+func (c *ioCore) Write(ent Entry, fields []Field) error {
+ buf, err := c.enc.EncodeEntry(ent, fields)
+ if err != nil {
+ return err
+ }
+ _, err = c.out.Write(buf.Bytes())
+ buf.Free()
+ if err != nil {
+ return err
+ }
+ if ent.Level > ErrorLevel {
+ // Since we may be crashing the program, sync the output. Ignore Sync
+ // errors, pending a clean solution to issue #370.
+ c.Sync()
+ }
+ return nil
+}
+
+func (c *ioCore) Sync() error {
+ return c.out.Sync()
+}
+
+func (c *ioCore) clone() *ioCore {
+ return &ioCore{
+ LevelEnabler: c.LevelEnabler,
+ enc: c.enc.Clone(),
+ out: c.out,
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go
new file mode 100644
index 0000000000..31000e91f7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapcore defines and implements the low-level interfaces upon which
+// zap is built. By providing alternate implementations of these interfaces,
+// external packages can extend zap's capabilities.
+package zapcore // import "go.uber.org/zap/zapcore"
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
new file mode 100644
index 0000000000..5769ff3e4e
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -0,0 +1,451 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+
+ "go.uber.org/zap/buffer"
+)
+
+// DefaultLineEnding defines the default line ending when writing logs.
+// Alternate line endings specified in EncoderConfig can override this
+// behavior.
+const DefaultLineEnding = "\n"
+
+// OmitKey defines the key to use when callers want to remove a key from log output.
+const OmitKey = ""
+
+// A LevelEncoder serializes a Level to a primitive type.
+type LevelEncoder func(Level, PrimitiveArrayEncoder)
+
+// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
+// InfoLevel is serialized to "info".
+func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.String())
+}
+
+// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring.
+// For example, InfoLevel is serialized to "info" and colored blue.
+func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToLowercaseColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.String())
+ }
+ enc.AppendString(s)
+}
+
+// CapitalLevelEncoder serializes a Level to an all-caps string. For example,
+// InfoLevel is serialized to "INFO".
+func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.CapitalString())
+}
+
+// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color.
+// For example, InfoLevel is serialized to "INFO" and colored blue.
+func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToCapitalColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.CapitalString())
+ }
+ enc.AppendString(s)
+}
+
+// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to
+// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder,
+// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else
+// is unmarshaled to LowercaseLevelEncoder.
+func (e *LevelEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "capital":
+ *e = CapitalLevelEncoder
+ case "capitalColor":
+ *e = CapitalColorLevelEncoder
+ case "color":
+ *e = LowercaseColorLevelEncoder
+ default:
+ *e = LowercaseLevelEncoder
+ }
+ return nil
+}
+
+// A TimeEncoder serializes a time.Time to a primitive type.
+type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
+
+// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
+// since the Unix epoch.
+func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ sec := float64(nanos) / float64(time.Second)
+ enc.AppendFloat64(sec)
+}
+
+// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of
+// milliseconds since the Unix epoch.
+func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ millis := float64(nanos) / float64(time.Millisecond)
+ enc.AppendFloat64(millis)
+}
+
+// EpochNanosTimeEncoder serializes a time.Time to an integer number of
+// nanoseconds since the Unix epoch.
+func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(t.UnixNano())
+}
+
+func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) {
+ type appendTimeEncoder interface {
+ AppendTimeLayout(time.Time, string)
+ }
+
+ if enc, ok := enc.(appendTimeEncoder); ok {
+ enc.AppendTimeLayout(t, layout)
+ return
+ }
+
+ enc.AppendString(t.Format(layout))
+}
+
+// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string
+// with millisecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc)
+}
+
+// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339, enc)
+}
+
+// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string
+// with nanosecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339Nano, enc)
+}
+
+// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using
+// given layout.
+func TimeEncoderOfLayout(layout string) TimeEncoder {
+ return func(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, layout, enc)
+ }
+}
+
+// UnmarshalText unmarshals text to a TimeEncoder.
+// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder.
+// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder.
+// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder.
+// "millis" is unmarshaled to EpochMillisTimeEncoder.
+// "nanos" is unmarshaled to EpochNanosEncoder.
+// Anything else is unmarshaled to EpochTimeEncoder.
+func (e *TimeEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "rfc3339nano", "RFC3339Nano":
+ *e = RFC3339NanoTimeEncoder
+ case "rfc3339", "RFC3339":
+ *e = RFC3339TimeEncoder
+ case "iso8601", "ISO8601":
+ *e = ISO8601TimeEncoder
+ case "millis":
+ *e = EpochMillisTimeEncoder
+ case "nanos":
+ *e = EpochNanosTimeEncoder
+ default:
+ *e = EpochTimeEncoder
+ }
+ return nil
+}
+
+// UnmarshalYAML unmarshals YAML to a TimeEncoder.
+// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
+//
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+//
+// If value is string, it uses UnmarshalText.
+//
+// timeEncoder: iso8601
+func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var o struct {
+ Layout string `json:"layout" yaml:"layout"`
+ }
+ if err := unmarshal(&o); err == nil {
+ *e = TimeEncoderOfLayout(o.Layout)
+ return nil
+ }
+
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ return e.UnmarshalText([]byte(s))
+}
+
+// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does.
+func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
+ return e.UnmarshalYAML(func(v interface{}) error {
+ return json.Unmarshal(data, v)
+ })
+}
+
+// A DurationEncoder serializes a time.Duration to a primitive type.
+type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
+
+// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
+func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendFloat64(float64(d) / float64(time.Second))
+}
+
+// NanosDurationEncoder serializes a time.Duration to an integer number of
+// nanoseconds elapsed.
+func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(int64(d))
+}
+
+// MillisDurationEncoder serializes a time.Duration to an integer number of
+// milliseconds elapsed.
+func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(d.Nanoseconds() / 1e6)
+}
+
+// StringDurationEncoder serializes a time.Duration using its built-in String
+// method.
+func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendString(d.String())
+}
+
+// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled
+// to StringDurationEncoder, and anything else is unmarshaled to
+// NanosDurationEncoder.
+func (e *DurationEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "string":
+ *e = StringDurationEncoder
+ case "nanos":
+ *e = NanosDurationEncoder
+ case "ms":
+ *e = MillisDurationEncoder
+ default:
+ *e = SecondsDurationEncoder
+ }
+ return nil
+}
+
+// A CallerEncoder serializes an EntryCaller to a primitive type.
+type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
+
+// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
+// format.
+func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.String())
+}
+
+// ShortCallerEncoder serializes a caller in package/file:line format, trimming
+// all but the final directory from the full path.
+func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.TrimmedPath())
+}
+
+// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to
+// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder.
+func (e *CallerEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullCallerEncoder
+ default:
+ *e = ShortCallerEncoder
+ }
+ return nil
+}
+
+// A NameEncoder serializes a period-separated logger name to a primitive
+// type.
+type NameEncoder func(string, PrimitiveArrayEncoder)
+
+// FullNameEncoder serializes the logger name as-is.
+func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) {
+ enc.AppendString(loggerName)
+}
+
+// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is
+// unmarshaled to FullNameEncoder.
+func (e *NameEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullNameEncoder
+ default:
+ *e = FullNameEncoder
+ }
+ return nil
+}
+
+// An EncoderConfig allows users to configure the concrete encoders supplied by
+// zapcore.
+type EncoderConfig struct {
+ // Set the keys used for each log entry. If any key is empty, that portion
+ // of the entry is omitted.
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ // Configure the primitive representations of common complex types. For
+ // example, some users may want all time.Times serialized as floating-point
+ // seconds since epoch, while others may prefer ISO8601 strings.
+ EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
+ EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
+ EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
+ EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
+ // Unlike the other primitive type encoders, EncodeName is optional. The
+ // zero value falls back to FullNameEncoder.
+ EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
+ // Configures the field separator used by the console encoder. Defaults
+ // to tab.
+ ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
+}
+
+// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
+// map- or struct-like object to the logging context. Like maps, ObjectEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ObjectEncoder interface {
+ // Logging-specific marshalers.
+ AddArray(key string, marshaler ArrayMarshaler) error
+ AddObject(key string, marshaler ObjectMarshaler) error
+
+ // Built-in types.
+ AddBinary(key string, value []byte) // for arbitrary bytes
+ AddByteString(key string, value []byte) // for UTF-8 encoded bytes
+ AddBool(key string, value bool)
+ AddComplex128(key string, value complex128)
+ AddComplex64(key string, value complex64)
+ AddDuration(key string, value time.Duration)
+ AddFloat64(key string, value float64)
+ AddFloat32(key string, value float32)
+ AddInt(key string, value int)
+ AddInt64(key string, value int64)
+ AddInt32(key string, value int32)
+ AddInt16(key string, value int16)
+ AddInt8(key string, value int8)
+ AddString(key, value string)
+ AddTime(key string, value time.Time)
+ AddUint(key string, value uint)
+ AddUint64(key string, value uint64)
+ AddUint32(key string, value uint32)
+ AddUint16(key string, value uint16)
+ AddUint8(key string, value uint8)
+ AddUintptr(key string, value uintptr)
+
+ // AddReflected uses reflection to serialize arbitrary objects, so it can be
+ // slow and allocation-heavy.
+ AddReflected(key string, value interface{}) error
+ // OpenNamespace opens an isolated namespace where all subsequent fields will
+ // be added. Applications can use namespaces to prevent key collisions when
+ // injecting loggers into sub-components or third-party libraries.
+ OpenNamespace(key string)
+}
+
+// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
+// array-like objects to the logging context. Of note, it supports mixed-type
+// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ArrayEncoder interface {
+ // Built-in types.
+ PrimitiveArrayEncoder
+
+ // Time-related types.
+ AppendDuration(time.Duration)
+ AppendTime(time.Time)
+
+ // Logging-specific marshalers.
+ AppendArray(ArrayMarshaler) error
+ AppendObject(ObjectMarshaler) error
+
+ // AppendReflected uses reflection to serialize arbitrary objects, so it's
+ // slow and allocation-heavy.
+ AppendReflected(value interface{}) error
+}
+
+// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
+// only in Go's built-in types. It's included only so that Duration- and
+// TimeEncoders cannot trigger infinite recursion.
+type PrimitiveArrayEncoder interface {
+ // Built-in types.
+ AppendBool(bool)
+ AppendByteString([]byte) // for UTF-8 encoded bytes
+ AppendComplex128(complex128)
+ AppendComplex64(complex64)
+ AppendFloat64(float64)
+ AppendFloat32(float32)
+ AppendInt(int)
+ AppendInt64(int64)
+ AppendInt32(int32)
+ AppendInt16(int16)
+ AppendInt8(int8)
+ AppendString(string)
+ AppendUint(uint)
+ AppendUint64(uint64)
+ AppendUint32(uint32)
+ AppendUint16(uint16)
+ AppendUint8(uint8)
+ AppendUintptr(uintptr)
+}
+
+// Encoder is a format-agnostic interface for all log entry marshalers. Since
+// log encoders don't need to support the same wide range of use cases as
+// general-purpose marshalers, it's possible to make them faster and
+// lower-allocation.
+//
+// Implementations of the ObjectEncoder interface's methods can, of course,
+// freely modify the receiver. However, the Clone and EncodeEntry methods will
+// be called concurrently and shouldn't modify the receiver.
+type Encoder interface {
+ ObjectEncoder
+
+ // Clone copies the encoder, ensuring that adding fields to the copy doesn't
+ // affect the original.
+ Clone() Encoder
+
+ // EncodeEntry encodes an entry and fields, along with any accumulated
+ // context, into a byte buffer and returns it. Any fields that are empty,
+ // including fields on the `Entry` type, should be omitted.
+ EncodeEntry(Entry, []Field) (*buffer.Buffer, error)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
new file mode 100644
index 0000000000..9d326e95ea
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -0,0 +1,300 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/exit"
+)
+
+var (
+ _cePool = sync.Pool{New: func() interface{} {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+ }}
+)
+
+func getCheckedEntry() *CheckedEntry {
+ ce := _cePool.Get().(*CheckedEntry)
+ ce.reset()
+ return ce
+}
+
+func putCheckedEntry(ce *CheckedEntry) {
+ if ce == nil {
+ return
+ }
+ _cePool.Put(ce)
+}
+
+// NewEntryCaller makes an EntryCaller from the return signature of
+// runtime.Caller.
+func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller {
+ if !ok {
+ return EntryCaller{}
+ }
+ return EntryCaller{
+ PC: pc,
+ File: file,
+ Line: line,
+ Defined: true,
+ }
+}
+
+// EntryCaller represents the caller of a logging function.
+type EntryCaller struct {
+ Defined bool
+ PC uintptr
+ File string
+ Line int
+ Function string
+}
+
+// String returns the full path and line number of the caller.
+func (ec EntryCaller) String() string {
+ return ec.FullPath()
+}
+
+// FullPath returns a /full/path/to/package/file:line description of the
+// caller.
+func (ec EntryCaller) FullPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ buf := bufferpool.Get()
+ buf.AppendString(ec.File)
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// TrimmedPath returns a package/file:line description of the caller,
+// preserving only the leaf directory name and file name.
+func (ec EntryCaller) TrimmedPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ // nb. To make sure we trim the path correctly on Windows too, we
+ // counter-intuitively need to use '/' and *not* os.PathSeparator here,
+ // because the path given originates from Go stdlib, specifically
+ // runtime.Caller() which (as of Mar/17) returns forward slashes even on
+ // Windows.
+ //
+ // See https://github.com/golang/go/issues/3335
+ // and https://github.com/golang/go/issues/18151
+ //
+ // for discussion on the issue on Go side.
+ //
+ // Find the last separator.
+ //
+ idx := strings.LastIndexByte(ec.File, '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ // Find the penultimate separator.
+ idx = strings.LastIndexByte(ec.File[:idx], '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ buf := bufferpool.Get()
+ // Keep everything after the penultimate separator.
+ buf.AppendString(ec.File[idx+1:])
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// An Entry represents a complete log message. The entry's structured context
+// is already serialized, but the log level, time, message, and call site
+// information are available for inspection and modification. Any fields left
+// empty will be omitted when encoding.
+//
+// Entries are pooled, so any functions that accept them MUST be careful not to
+// retain references to them.
+type Entry struct {
+ Level Level
+ Time time.Time
+ LoggerName string
+ Message string
+ Caller EntryCaller
+ Stack string
+}
+
+// CheckWriteHook is a custom action that may be executed after an entry is
+// written.
+//
+// Register one on a CheckedEntry with the After method.
+//
+// if ce := logger.Check(...); ce != nil {
+// ce = ce.After(hook)
+// ce.Write(...)
+// }
+//
+// You can configure the hook for Fatal log statements at the logger level with
+// the zap.WithFatalHook option.
+type CheckWriteHook interface {
+ // OnWrite is invoked with the CheckedEntry that was written and a list
+ // of fields added with that entry.
+ //
+ // The list of fields DOES NOT include fields that were already added
+ // to the logger with the With method.
+ OnWrite(*CheckedEntry, []Field)
+}
+
+// CheckWriteAction indicates what action to take after a log entry is
+// processed. Actions are ordered in increasing severity.
+type CheckWriteAction uint8
+
+const (
+ // WriteThenNoop indicates that nothing special needs to be done. It's the
+ // default behavior.
+ WriteThenNoop CheckWriteAction = iota
+ // WriteThenGoexit runs runtime.Goexit after Write.
+ WriteThenGoexit
+ // WriteThenPanic causes a panic after Write.
+ WriteThenPanic
+ // WriteThenFatal causes an os.Exit(1) after Write.
+ WriteThenFatal
+)
+
+// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
+// with the new CheckWriteHook interface which deprecates CheckWriteAction.
+func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
+ switch a {
+ case WriteThenGoexit:
+ runtime.Goexit()
+ case WriteThenPanic:
+ panic(ce.Message)
+ case WriteThenFatal:
+ exit.With(1)
+ }
+}
+
+var _ CheckWriteHook = CheckWriteAction(0)
+
+// CheckedEntry is an Entry together with a collection of Cores that have
+// already agreed to log it.
+//
+// CheckedEntry references should be created by calling AddCore or After on a
+// nil *CheckedEntry. References are returned to a pool after Write, and MUST
+// NOT be retained after calling their Write method.
+type CheckedEntry struct {
+ Entry
+ ErrorOutput WriteSyncer
+ dirty bool // best-effort detection of pool misuse
+ after CheckWriteHook
+ cores []Core
+}
+
+func (ce *CheckedEntry) reset() {
+ ce.Entry = Entry{}
+ ce.ErrorOutput = nil
+ ce.dirty = false
+ ce.after = nil
+ for i := range ce.cores {
+ // don't keep references to cores
+ ce.cores[i] = nil
+ }
+ ce.cores = ce.cores[:0]
+}
+
+// Write writes the entry to the stored Cores, returns any errors, and returns
+// the CheckedEntry reference to a pool for immediate re-use. Finally, it
+// executes any required CheckWriteAction.
+func (ce *CheckedEntry) Write(fields ...Field) {
+ if ce == nil {
+ return
+ }
+
+ if ce.dirty {
+ if ce.ErrorOutput != nil {
+ // Make a best effort to detect unsafe re-use of this CheckedEntry.
+ // If the entry is dirty, log an internal error; because the
+ // CheckedEntry is being used after it was returned to the pool,
+ // the message may be an amalgamation from multiple call sites.
+ fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
+ ce.ErrorOutput.Sync()
+ }
+ return
+ }
+ ce.dirty = true
+
+ var err error
+ for i := range ce.cores {
+ err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
+ }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ ce.ErrorOutput.Sync()
+ }
+
+ hook := ce.after
+ if hook != nil {
+ hook.OnWrite(ce, fields)
+ }
+ putCheckedEntry(ce)
+}
+
+// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
+// used by Core.Check implementations, and is safe to call on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.cores = append(ce.cores, core)
+ return ce
+}
+
+// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
+// Core will panic or fatal after writing this log entry. Like AddCore, it's
+// safe to call on nil CheckedEntry references.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
+func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ return ce.After(ent, should)
+}
+
+// After sets this CheckEntry's CheckWriteHook, which will be called after this
+// log entry has been written. It's safe to call this on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.after = hook
+ return ce
+}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
new file mode 100644
index 0000000000..06359907af
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// Encodes the given error into fields of an object. A field with the given
+// name is added for the error message.
+//
+// If the error implements fmt.Formatter, a field with the name ${key}Verbose
+// is also added with the full verbose error message.
+//
+// Finally, if the error implements errorGroup (from go.uber.org/multierr) or
+// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
+// array of objects containing the errors this error was comprised of.
+//
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
+func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the Error() method
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // error that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", rerr)
+ }
+ }()
+
+ basic := err.Error()
+ enc.AddString(key, basic)
+
+ switch e := err.(type) {
+ case errorGroup:
+ return enc.AddArray(key+"Causes", errArray(e.Errors()))
+ case fmt.Formatter:
+ verbose := fmt.Sprintf("%+v", e)
+ if verbose != basic {
+ // This is a rich error type, like those produced by
+ // github.com/pkg/errors.
+ enc.AddString(key+"Verbose", verbose)
+ }
+ }
+ return nil
+}
+
+type errorGroup interface {
+ // Provides read-only access to the underlying list of errors, preferably
+ // without causing any allocs.
+ Errors() []error
+}
+
+// Note that errArray and errArrayElem are very similar to the version
+// implemented in the top-level error.go file. We can't re-use this because
+// that would require exporting errArray as part of the zapcore API.
+
+// Encodes a list of errors using the standard error encoding logic.
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+
+ el := newErrArrayElem(errs[i])
+ arr.AppendObject(el)
+ el.Free()
+ }
+ return nil
+}
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Encodes any error into a {"error": ...} re-using the same errors logic.
+//
+// May be passed in place of an array to build a single-element array.
+type errArrayElem struct{ err error }
+
+func newErrArrayElem(err error) *errArrayElem {
+ e := _errArrayElemPool.Get().(*errArrayElem)
+ e.err = err
+ return e
+}
+
+func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error {
+ return arr.AppendObject(e)
+}
+
+func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error {
+ return encodeError("error", e.err, enc)
+}
+
+func (e *errArrayElem) Free() {
+ e.err = nil
+ _errArrayElemPool.Put(e)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
new file mode 100644
index 0000000000..95bdb0a126
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -0,0 +1,233 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+)
+
+// A FieldType indicates which member of the Field union struct should be used
+// and how it should be serialized.
+type FieldType uint8
+
+const (
+ // UnknownType is the default field type. Attempting to add it to an encoder will panic.
+ UnknownType FieldType = iota
+ // ArrayMarshalerType indicates that the field carries an ArrayMarshaler.
+ ArrayMarshalerType
+ // ObjectMarshalerType indicates that the field carries an ObjectMarshaler.
+ ObjectMarshalerType
+ // BinaryType indicates that the field carries an opaque binary blob.
+ BinaryType
+ // BoolType indicates that the field carries a bool.
+ BoolType
+ // ByteStringType indicates that the field carries UTF-8 encoded bytes.
+ ByteStringType
+ // Complex128Type indicates that the field carries a complex128.
+ Complex128Type
+ // Complex64Type indicates that the field carries a complex128.
+ Complex64Type
+ // DurationType indicates that the field carries a time.Duration.
+ DurationType
+ // Float64Type indicates that the field carries a float64.
+ Float64Type
+ // Float32Type indicates that the field carries a float32.
+ Float32Type
+ // Int64Type indicates that the field carries an int64.
+ Int64Type
+ // Int32Type indicates that the field carries an int32.
+ Int32Type
+ // Int16Type indicates that the field carries an int16.
+ Int16Type
+ // Int8Type indicates that the field carries an int8.
+ Int8Type
+ // StringType indicates that the field carries a string.
+ StringType
+ // TimeType indicates that the field carries a time.Time that is
+ // representable by a UnixNano() stored as an int64.
+ TimeType
+ // TimeFullType indicates that the field carries a time.Time stored as-is.
+ TimeFullType
+ // Uint64Type indicates that the field carries a uint64.
+ Uint64Type
+ // Uint32Type indicates that the field carries a uint32.
+ Uint32Type
+ // Uint16Type indicates that the field carries a uint16.
+ Uint16Type
+ // Uint8Type indicates that the field carries a uint8.
+ Uint8Type
+ // UintptrType indicates that the field carries a uintptr.
+ UintptrType
+ // ReflectType indicates that the field carries an interface{}, which should
+ // be serialized using reflection.
+ ReflectType
+ // NamespaceType signals the beginning of an isolated namespace. All
+ // subsequent fields should be added to the new namespace.
+ NamespaceType
+ // StringerType indicates that the field carries a fmt.Stringer.
+ StringerType
+ // ErrorType indicates that the field carries an error.
+ ErrorType
+ // SkipType indicates that the field is a no-op.
+ SkipType
+
+ // InlineMarshalerType indicates that the field carries an ObjectMarshaler
+ // that should be inlined.
+ InlineMarshalerType
+)
+
+// A Field is a marshaling operation used to add a key-value pair to a logger's
+// context. Most fields are lazily marshaled, so it's inexpensive to add fields
+// to disabled debug-level log statements.
+type Field struct {
+ Key string
+ Type FieldType
+ Integer int64
+ String string
+ Interface interface{}
+}
+
+// AddTo exports a field through the ObjectEncoder interface. It's primarily
+// useful to library authors, and shouldn't be necessary in most applications.
+func (f Field) AddTo(enc ObjectEncoder) {
+ var err error
+
+ switch f.Type {
+ case ArrayMarshalerType:
+ err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler))
+ case ObjectMarshalerType:
+ err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler))
+ case InlineMarshalerType:
+ err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc)
+ case BinaryType:
+ enc.AddBinary(f.Key, f.Interface.([]byte))
+ case BoolType:
+ enc.AddBool(f.Key, f.Integer == 1)
+ case ByteStringType:
+ enc.AddByteString(f.Key, f.Interface.([]byte))
+ case Complex128Type:
+ enc.AddComplex128(f.Key, f.Interface.(complex128))
+ case Complex64Type:
+ enc.AddComplex64(f.Key, f.Interface.(complex64))
+ case DurationType:
+ enc.AddDuration(f.Key, time.Duration(f.Integer))
+ case Float64Type:
+ enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer)))
+ case Float32Type:
+ enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer)))
+ case Int64Type:
+ enc.AddInt64(f.Key, f.Integer)
+ case Int32Type:
+ enc.AddInt32(f.Key, int32(f.Integer))
+ case Int16Type:
+ enc.AddInt16(f.Key, int16(f.Integer))
+ case Int8Type:
+ enc.AddInt8(f.Key, int8(f.Integer))
+ case StringType:
+ enc.AddString(f.Key, f.String)
+ case TimeType:
+ if f.Interface != nil {
+ enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location)))
+ } else {
+ // Fall back to UTC if location is nil.
+ enc.AddTime(f.Key, time.Unix(0, f.Integer))
+ }
+ case TimeFullType:
+ enc.AddTime(f.Key, f.Interface.(time.Time))
+ case Uint64Type:
+ enc.AddUint64(f.Key, uint64(f.Integer))
+ case Uint32Type:
+ enc.AddUint32(f.Key, uint32(f.Integer))
+ case Uint16Type:
+ enc.AddUint16(f.Key, uint16(f.Integer))
+ case Uint8Type:
+ enc.AddUint8(f.Key, uint8(f.Integer))
+ case UintptrType:
+ enc.AddUintptr(f.Key, uintptr(f.Integer))
+ case ReflectType:
+ err = enc.AddReflected(f.Key, f.Interface)
+ case NamespaceType:
+ enc.OpenNamespace(f.Key)
+ case StringerType:
+ err = encodeStringer(f.Key, f.Interface, enc)
+ case ErrorType:
+ err = encodeError(f.Key, f.Interface.(error), enc)
+ case SkipType:
+ break
+ default:
+ panic(fmt.Sprintf("unknown field type: %v", f))
+ }
+
+ if err != nil {
+ enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error())
+ }
+}
+
+// Equals returns whether two fields are equal. For non-primitive types such as
+// errors, marshalers, or reflect types, it uses reflect.DeepEqual.
+func (f Field) Equals(other Field) bool {
+ if f.Type != other.Type {
+ return false
+ }
+ if f.Key != other.Key {
+ return false
+ }
+
+ switch f.Type {
+ case BinaryType, ByteStringType:
+ return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte))
+ case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType:
+ return reflect.DeepEqual(f.Interface, other.Interface)
+ default:
+ return f == other
+ }
+}
+
+func addFields(enc ObjectEncoder, fields []Field) {
+ for i := range fields {
+ fields[i].AddTo(enc)
+ }
+}
+
+func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the String() method, similar to https://golang.org/src/fmt/print.go#L540
+ defer func() {
+ if err := recover(); err != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // Stringer that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", err)
+ }
+ }()
+
+ enc.AddString(key, stringer.(fmt.Stringer).String())
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
new file mode 100644
index 0000000000..198def9917
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type hooked struct {
+ Core
+ funcs []func(Entry) error
+}
+
+var (
+ _ Core = (*hooked)(nil)
+ _ leveledEnabler = (*hooked)(nil)
+)
+
+// RegisterHooks wraps a Core and runs a collection of user-defined callback
+// hooks each time a message is logged. Execution of the callbacks is blocking.
+//
+// This offers users an easy way to register simple callbacks (e.g., metrics
+// collection) without implementing the full Core interface.
+func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
+ funcs := append([]func(Entry) error{}, hooks...)
+ return &hooked{
+ Core: core,
+ funcs: funcs,
+ }
+}
+
+func (h *hooked) Level() Level {
+ return LevelOf(h.Core)
+}
+
+func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ // Let the wrapped Core decide whether to log this message or not. This
+ // also gives the downstream a chance to register itself directly with the
+ // CheckedEntry.
+ if downstream := h.Core.Check(ent, ce); downstream != nil {
+ return downstream.AddCore(ent, h)
+ }
+ return ce
+}
+
+func (h *hooked) With(fields []Field) Core {
+ return &hooked{
+ Core: h.Core.With(fields),
+ funcs: h.funcs,
+ }
+}
+
+func (h *hooked) Write(ent Entry, _ []Field) error {
+ // Since our downstream had a chance to register itself directly with the
+ // CheckedMessage, we don't need to call it here.
+ var err error
+ for i := range h.funcs {
+ err = multierr.Append(err, h.funcs[i](ent))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
new file mode 100644
index 0000000000..7a11237ae9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -0,0 +1,75 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "fmt"
+
+type levelFilterCore struct {
+ core Core
+ level LevelEnabler
+}
+
+var (
+ _ Core = (*levelFilterCore)(nil)
+ _ leveledEnabler = (*levelFilterCore)(nil)
+)
+
+// NewIncreaseLevelCore creates a core that can be used to increase the level of
+// an existing Core. It cannot be used to decrease the logging level, as it acts
+// as a filter before calling the underlying core. If level decreases the log level,
+// an error is returned.
+func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) {
+ for l := _maxLevel; l >= _minLevel; l-- {
+ if !core.Enabled(l) && level.Enabled(l) {
+ return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l)
+ }
+ }
+
+ return &levelFilterCore{core, level}, nil
+}
+
+func (c *levelFilterCore) Enabled(lvl Level) bool {
+ return c.level.Enabled(lvl)
+}
+
+func (c *levelFilterCore) Level() Level {
+ return LevelOf(c.level)
+}
+
+func (c *levelFilterCore) With(fields []Field) Core {
+ return &levelFilterCore{c.core.With(fields), c.level}
+}
+
+func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !c.Enabled(ent.Level) {
+ return ce
+ }
+
+ return c.core.Check(ent, ce)
+}
+
+func (c *levelFilterCore) Write(ent Entry, fields []Field) error {
+ return c.core.Write(ent, fields)
+}
+
+func (c *levelFilterCore) Sync() error {
+ return c.core.Sync()
+}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
new file mode 100644
index 0000000000..3921c5cd33
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -0,0 +1,562 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/base64"
+ "math"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+// For JSON-escaping; see jsonEncoder.safeAddString below.
+const _hex = "0123456789abcdef"
+
+var _jsonPool = sync.Pool{New: func() interface{} {
+ return &jsonEncoder{}
+}}
+
+func getJSONEncoder() *jsonEncoder {
+ return _jsonPool.Get().(*jsonEncoder)
+}
+
+func putJSONEncoder(enc *jsonEncoder) {
+ if enc.reflectBuf != nil {
+ enc.reflectBuf.Free()
+ }
+ enc.EncoderConfig = nil
+ enc.buf = nil
+ enc.spaced = false
+ enc.openNamespaces = 0
+ enc.reflectBuf = nil
+ enc.reflectEnc = nil
+ _jsonPool.Put(enc)
+}
+
+type jsonEncoder struct {
+ *EncoderConfig
+ buf *buffer.Buffer
+ spaced bool // include spaces after colons and commas
+ openNamespaces int
+
+ // for encoding generic values by reflection
+ reflectBuf *buffer.Buffer
+ reflectEnc ReflectedEncoder
+}
+
+// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
+// appropriately escapes all field keys and values.
+//
+// Note that the encoder doesn't deduplicate keys, so it's possible to produce
+// a message like
+//
+// {"foo":"bar","foo":"baz"}
+//
+// This is permitted by the JSON specification, but not encouraged. Many
+// libraries will ignore duplicate key-value pairs (typically keeping the last
+// pair) when unmarshaling, but users should attempt to avoid adding duplicate
+// keys.
+func NewJSONEncoder(cfg EncoderConfig) Encoder {
+ return newJSONEncoder(cfg, false)
+}
+
+func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
+ return &jsonEncoder{
+ EncoderConfig: &cfg,
+ buf: bufferpool.Get(),
+ spaced: spaced,
+ }
+}
+
+func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendArray(arr)
+}
+
+func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendObject(obj)
+}
+
+func (enc *jsonEncoder) AddBinary(key string, val []byte) {
+ enc.AddString(key, base64.StdEncoding.EncodeToString(val))
+}
+
+func (enc *jsonEncoder) AddByteString(key string, val []byte) {
+ enc.addKey(key)
+ enc.AppendByteString(val)
+}
+
+func (enc *jsonEncoder) AddBool(key string, val bool) {
+ enc.addKey(key)
+ enc.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
+ enc.addKey(key)
+ enc.AppendComplex128(val)
+}
+
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
+func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
+ enc.addKey(key)
+ enc.AppendDuration(val)
+}
+
+func (enc *jsonEncoder) AddFloat64(key string, val float64) {
+ enc.addKey(key)
+ enc.AppendFloat64(val)
+}
+
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
+func (enc *jsonEncoder) AddInt64(key string, val int64) {
+ enc.addKey(key)
+ enc.AppendInt64(val)
+}
+
+func (enc *jsonEncoder) resetReflectBuf() {
+ if enc.reflectBuf == nil {
+ enc.reflectBuf = bufferpool.Get()
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
+ } else {
+ enc.reflectBuf.Reset()
+ }
+}
+
+var nullLiteralBytes = []byte("null")
+
+// Only invoke the standard JSON encoder if there is actually something to
+// encode; otherwise write JSON null literal directly.
+func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) {
+ if obj == nil {
+ return nullLiteralBytes, nil
+ }
+ enc.resetReflectBuf()
+ if err := enc.reflectEnc.Encode(obj); err != nil {
+ return nil, err
+ }
+ enc.reflectBuf.TrimNewline()
+ return enc.reflectBuf.Bytes(), nil
+}
+
+func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
+ valueBytes, err := enc.encodeReflected(obj)
+ if err != nil {
+ return err
+ }
+ enc.addKey(key)
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) OpenNamespace(key string) {
+ enc.addKey(key)
+ enc.buf.AppendByte('{')
+ enc.openNamespaces++
+}
+
+func (enc *jsonEncoder) AddString(key, val string) {
+ enc.addKey(key)
+ enc.AppendString(val)
+}
+
+func (enc *jsonEncoder) AddTime(key string, val time.Time) {
+ enc.addKey(key)
+ enc.AppendTime(val)
+}
+
+func (enc *jsonEncoder) AddUint64(key string, val uint64) {
+ enc.addKey(key)
+ enc.AppendUint64(val)
+}
+
+func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('[')
+ err := arr.MarshalLogArray(enc)
+ enc.buf.AppendByte(']')
+ return err
+}
+
+func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
+ enc.addElementSeparator()
+ enc.buf.AppendByte('{')
+ err := obj.MarshalLogObject(enc)
+ enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
+ return err
+}
+
+func (enc *jsonEncoder) AppendBool(val bool) {
+ enc.addElementSeparator()
+ enc.buf.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AppendByteString(val []byte) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddByteString(val)
+ enc.buf.AppendByte('"')
+}
+
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
+ enc.addElementSeparator()
+ // Cast to a platform-independent, fixed-size type.
+ r, i := float64(real(val)), float64(imag(val))
+ enc.buf.AppendByte('"')
+ // Because we're always in a quoted string, we can use strconv without
+ // special-casing NaN and +/-Inf.
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
+ enc.buf.AppendByte('i')
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendDuration(val time.Duration) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeDuration; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
+ // JSON valid.
+ enc.AppendInt64(int64(val))
+ }
+}
+
+func (enc *jsonEncoder) AppendInt64(val int64) {
+ enc.addElementSeparator()
+ enc.buf.AppendInt(val)
+}
+
+func (enc *jsonEncoder) AppendReflected(val interface{}) error {
+ valueBytes, err := enc.encodeReflected(val)
+ if err != nil {
+ return err
+ }
+ enc.addElementSeparator()
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) AppendString(val string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.buf.AppendTime(time, layout)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTime(val time.Time) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeTime; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
+ // output JSON valid.
+ enc.AppendInt64(val.UnixNano())
+ }
+}
+
+func (enc *jsonEncoder) AppendUint64(val uint64) {
+ enc.addElementSeparator()
+ enc.buf.AppendUint(val)
+}
+
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+
+func (enc *jsonEncoder) Clone() Encoder {
+ clone := enc.clone()
+ clone.buf.Write(enc.buf.Bytes())
+ return clone
+}
+
+func (enc *jsonEncoder) clone() *jsonEncoder {
+ clone := getJSONEncoder()
+ clone.EncoderConfig = enc.EncoderConfig
+ clone.spaced = enc.spaced
+ clone.openNamespaces = enc.openNamespaces
+ clone.buf = bufferpool.Get()
+ return clone
+}
+
+func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ final := enc.clone()
+ final.buf.AppendByte('{')
+
+ if final.LevelKey != "" && final.EncodeLevel != nil {
+ final.addKey(final.LevelKey)
+ cur := final.buf.Len()
+ final.EncodeLevel(ent.Level, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeLevel was a no-op. Fall back to strings to keep
+ // output JSON valid.
+ final.AppendString(ent.Level.String())
+ }
+ }
+ if final.TimeKey != "" {
+ final.AddTime(final.TimeKey, ent.Time)
+ }
+ if ent.LoggerName != "" && final.NameKey != "" {
+ final.addKey(final.NameKey)
+ cur := final.buf.Len()
+ nameEncoder := final.EncodeName
+
+ // if no name encoder provided, fall back to FullNameEncoder for backwards
+ // compatibility
+ if nameEncoder == nil {
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeName was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.LoggerName)
+ }
+ }
+ if ent.Caller.Defined {
+ if final.CallerKey != "" {
+ final.addKey(final.CallerKey)
+ cur := final.buf.Len()
+ final.EncodeCaller(ent.Caller, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeCaller was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.Caller.String())
+ }
+ }
+ if final.FunctionKey != "" {
+ final.addKey(final.FunctionKey)
+ final.AppendString(ent.Caller.Function)
+ }
+ }
+ if final.MessageKey != "" {
+ final.addKey(enc.MessageKey)
+ final.AppendString(ent.Message)
+ }
+ if enc.buf.Len() > 0 {
+ final.addElementSeparator()
+ final.buf.Write(enc.buf.Bytes())
+ }
+ addFields(final, fields)
+ final.closeOpenNamespaces()
+ if ent.Stack != "" && final.StacktraceKey != "" {
+ final.AddString(final.StacktraceKey, ent.Stack)
+ }
+ final.buf.AppendByte('}')
+ final.buf.AppendString(final.LineEnding)
+
+ ret := final.buf
+ putJSONEncoder(final)
+ return ret, nil
+}
+
+func (enc *jsonEncoder) truncate() {
+ enc.buf.Reset()
+}
+
+func (enc *jsonEncoder) closeOpenNamespaces() {
+ for i := 0; i < enc.openNamespaces; i++ {
+ enc.buf.AppendByte('}')
+ }
+ enc.openNamespaces = 0
+}
+
+func (enc *jsonEncoder) addKey(key string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(key)
+ enc.buf.AppendByte('"')
+ enc.buf.AppendByte(':')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+}
+
+func (enc *jsonEncoder) addElementSeparator() {
+ last := enc.buf.Len() - 1
+ if last < 0 {
+ return
+ }
+ switch enc.buf.Bytes()[last] {
+ case '{', '[', ':', ',', ' ':
+ return
+ default:
+ enc.buf.AppendByte(',')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+ }
+}
+
+func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
+ enc.addElementSeparator()
+ switch {
+ case math.IsNaN(val):
+ enc.buf.AppendString(`"NaN"`)
+ case math.IsInf(val, 1):
+ enc.buf.AppendString(`"+Inf"`)
+ case math.IsInf(val, -1):
+ enc.buf.AppendString(`"-Inf"`)
+ default:
+ enc.buf.AppendFloat(val, bitSize)
+ }
+}
+
+// safeAddString JSON-escapes a string and appends it to the internal buffer.
+// Unlike the standard library's encoder, it doesn't attempt to protect the
+// user from browser vulnerabilities or JSONP-related problems.
+func (enc *jsonEncoder) safeAddString(s string) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRuneInString(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.AppendString(s[i : i+size])
+ i += size
+ }
+}
+
+// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
+func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRune(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.Write(s[i : i+size])
+ i += size
+ }
+}
+
+// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
+func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
+ if b >= utf8.RuneSelf {
+ return false
+ }
+ if 0x20 <= b && b != '\\' && b != '"' {
+ enc.buf.AppendByte(b)
+ return true
+ }
+ switch b {
+ case '\\', '"':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte(b)
+ case '\n':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('n')
+ case '\r':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('r')
+ case '\t':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ enc.buf.AppendString(`\u00`)
+ enc.buf.AppendByte(_hex[b>>4])
+ enc.buf.AppendByte(_hex[b&0xF])
+ }
+ return true
+}
+
+func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
+ if r == utf8.RuneError && size == 1 {
+ enc.buf.AppendString(`\ufffd`)
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
new file mode 100644
index 0000000000..e01a241316
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -0,0 +1,229 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level")
+
+// A Level is a logging priority. Higher levels are more important.
+type Level int8
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel Level = iota - 1
+ // InfoLevel is the default logging priority.
+ InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel
+
+ _minLevel = DebugLevel
+ _maxLevel = FatalLevel
+
+ // InvalidLevel is an invalid value for Level.
+ //
+ // Core implementations may panic if they see messages of this level.
+ InvalidLevel = _maxLevel + 1
+)
+
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
+type leveledEnabler interface {
+ LevelEnabler
+
+ Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+// func (c *core) Level() Level {
+// return c.currentLevel
+// }
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+// func (c *coreWrapper) Level() Level {
+// return zapcore.LevelOf(c.wrappedCore)
+// }
+func LevelOf(enab LevelEnabler) Level {
+ if lvler, ok := enab.(leveledEnabler); ok {
+ return lvler.Level()
+ }
+
+ for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+ if enab.Enabled(lvl) {
+ return lvl
+ }
+ }
+
+ return InvalidLevel
+}
+
+// String returns a lower-case ASCII representation of the log level.
+func (l Level) String() string {
+ switch l {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warn"
+ case ErrorLevel:
+ return "error"
+ case DPanicLevel:
+ return "dpanic"
+ case PanicLevel:
+ return "panic"
+ case FatalLevel:
+ return "fatal"
+ default:
+ return fmt.Sprintf("Level(%d)", l)
+ }
+}
+
+// CapitalString returns an all-caps ASCII representation of the log level.
+func (l Level) CapitalString() string {
+ // Printing levels in all-caps is common enough that we should export this
+ // functionality.
+ switch l {
+ case DebugLevel:
+ return "DEBUG"
+ case InfoLevel:
+ return "INFO"
+ case WarnLevel:
+ return "WARN"
+ case ErrorLevel:
+ return "ERROR"
+ case DPanicLevel:
+ return "DPANIC"
+ case PanicLevel:
+ return "PANIC"
+ case FatalLevel:
+ return "FATAL"
+ default:
+ return fmt.Sprintf("LEVEL(%d)", l)
+ }
+}
+
+// MarshalText marshals the Level to text. Note that the text representation
+// drops the -Level suffix (see example).
+func (l Level) MarshalText() ([]byte, error) {
+ return []byte(l.String()), nil
+}
+
+// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText
+// expects the text representation of a Level to drop the -Level suffix (see
+// example).
+//
+// In particular, this makes it easy to configure logging levels using YAML,
+// TOML, or JSON files.
+func (l *Level) UnmarshalText(text []byte) error {
+ if l == nil {
+ return errUnmarshalNilLevel
+ }
+ if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) {
+ return fmt.Errorf("unrecognized level: %q", text)
+ }
+ return nil
+}
+
+func (l *Level) unmarshalText(text []byte) bool {
+ switch string(text) {
+ case "debug", "DEBUG":
+ *l = DebugLevel
+ case "info", "INFO", "": // make the zero value useful
+ *l = InfoLevel
+ case "warn", "WARN":
+ *l = WarnLevel
+ case "error", "ERROR":
+ *l = ErrorLevel
+ case "dpanic", "DPANIC":
+ *l = DPanicLevel
+ case "panic", "PANIC":
+ *l = PanicLevel
+ case "fatal", "FATAL":
+ *l = FatalLevel
+ default:
+ return false
+ }
+ return true
+}
+
+// Set sets the level for the flag.Value interface.
+func (l *Level) Set(s string) error {
+ return l.UnmarshalText([]byte(s))
+}
+
+// Get gets the level for the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Enabled returns true if the given level is at or above this level.
+func (l Level) Enabled(lvl Level) bool {
+ return lvl >= l
+}
+
+// LevelEnabler decides whether a given logging level is enabled when logging a
+// message.
+//
+// Enablers are intended to be used to implement deterministic filters;
+// concerns like sampling are better implemented as a Core.
+//
+// Each concrete Level value implements a static LevelEnabler which returns
+// true for itself and all higher logging levels. For example WarnLevel.Enabled()
+// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and
+// FatalLevel, but return false for InfoLevel and DebugLevel.
+type LevelEnabler interface {
+ Enabled(Level) bool
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go
new file mode 100644
index 0000000000..7af8dadcb3
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level_strings.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/zap/internal/color"
+
+var (
+ _levelToColor = map[Level]color.Color{
+ DebugLevel: color.Magenta,
+ InfoLevel: color.Blue,
+ WarnLevel: color.Yellow,
+ ErrorLevel: color.Red,
+ DPanicLevel: color.Red,
+ PanicLevel: color.Red,
+ FatalLevel: color.Red,
+ }
+ _unknownLevelColor = color.Red
+
+ _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor))
+ _levelToCapitalColorString = make(map[Level]string, len(_levelToColor))
+)
+
+func init() {
+ for level, color := range _levelToColor {
+ _levelToLowercaseColorString[level] = color.Add(level.String())
+ _levelToCapitalColorString[level] = color.Add(level.CapitalString())
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go
new file mode 100644
index 0000000000..c3c55ba0d9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/marshaler.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// ObjectMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ObjectMarshaler is only used when zap.Object is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ObjectMarshaler interface {
+ MarshalLogObject(ObjectEncoder) error
+}
+
+// ObjectMarshalerFunc is a type adapter that turns a function into an
+// ObjectMarshaler.
+type ObjectMarshalerFunc func(ObjectEncoder) error
+
+// MarshalLogObject calls the underlying function.
+func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
+ return f(enc)
+}
+
+// ArrayMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ArrayMarshaler is only used when zap.Array is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ArrayMarshaler interface {
+ MarshalLogArray(ArrayEncoder) error
+}
+
+// ArrayMarshalerFunc is a type adapter that turns a function into an
+// ArrayMarshaler.
+type ArrayMarshalerFunc func(ArrayEncoder) error
+
+// MarshalLogArray calls the underlying function.
+func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
+ return f(enc)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
new file mode 100644
index 0000000000..dfead0829d
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// MapObjectEncoder is an ObjectEncoder backed by a simple
+// map[string]interface{}. It's not fast enough for production use, but it's
+// helpful in tests.
+type MapObjectEncoder struct {
+ // Fields contains the entire encoded log context.
+ Fields map[string]interface{}
+ // cur is a pointer to the namespace we're currently writing to.
+ cur map[string]interface{}
+}
+
+// NewMapObjectEncoder creates a new map-backed ObjectEncoder.
+func NewMapObjectEncoder() *MapObjectEncoder {
+ m := make(map[string]interface{})
+ return &MapObjectEncoder{
+ Fields: m,
+ cur: m,
+ }
+}
+
+// AddArray implements ObjectEncoder.
+func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error {
+ arr := &sliceArrayEncoder{elems: make([]interface{}, 0)}
+ err := v.MarshalLogArray(arr)
+ m.cur[key] = arr.elems
+ return err
+}
+
+// AddObject implements ObjectEncoder.
+func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error {
+ newMap := NewMapObjectEncoder()
+ m.cur[k] = newMap.Fields
+ return v.MarshalLogObject(newMap)
+}
+
+// AddBinary implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v }
+
+// AddByteString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) }
+
+// AddBool implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v }
+
+// AddDuration implements ObjectEncoder.
+func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v }
+
+// AddComplex128 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v }
+
+// AddComplex64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v }
+
+// AddFloat64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v }
+
+// AddFloat32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v }
+
+// AddInt implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v }
+
+// AddInt64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v }
+
+// AddInt32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v }
+
+// AddInt16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v }
+
+// AddInt8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v }
+
+// AddString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v }
+
+// AddTime implements ObjectEncoder.
+func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v }
+
+// AddUint implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v }
+
+// AddUint64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v }
+
+// AddUint32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v }
+
+// AddUint16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v }
+
+// AddUint8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v }
+
+// AddUintptr implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v }
+
+// AddReflected implements ObjectEncoder.
+func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error {
+ m.cur[k] = v
+ return nil
+}
+
+// OpenNamespace implements ObjectEncoder.
+func (m *MapObjectEncoder) OpenNamespace(k string) {
+ ns := make(map[string]interface{})
+ m.cur[k] = ns
+ m.cur = ns
+}
+
+// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like
+// the MapObjectEncoder, it's not designed for production use.
+type sliceArrayEncoder struct {
+ elems []interface{}
+}
+
+func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error {
+ enc := &sliceArrayEncoder{}
+ err := v.MarshalLogArray(enc)
+ s.elems = append(s.elems, enc.elems)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error {
+ m := NewMapObjectEncoder()
+ err := v.MarshalLogObject(m)
+ s.elems = append(s.elems, m.Fields)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendReflected(v interface{}) error {
+ s.elems = append(s.elems, v)
+ return nil
+}
+
+func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) }
+func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) }
diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
new file mode 100644
index 0000000000..8746360eca
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
new file mode 100644
index 0000000000..dc518055a4
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -0,0 +1,230 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+const (
+ _numLevels = _maxLevel - _minLevel + 1
+ _countersPerLevel = 4096
+)
+
+type counter struct {
+ resetAt atomic.Int64
+ counter atomic.Uint64
+}
+
+type counters [_numLevels][_countersPerLevel]counter
+
+func newCounters() *counters {
+ return &counters{}
+}
+
+func (cs *counters) get(lvl Level, key string) *counter {
+ i := lvl - _minLevel
+ j := fnv32a(key) % _countersPerLevel
+ return &cs[i][j]
+}
+
+// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc
+func fnv32a(s string) uint32 {
+ const (
+ offset32 = 2166136261
+ prime32 = 16777619
+ )
+ hash := uint32(offset32)
+ for i := 0; i < len(s); i++ {
+ hash ^= uint32(s[i])
+ hash *= prime32
+ }
+ return hash
+}
+
+func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
+ tn := t.UnixNano()
+ resetAfter := c.resetAt.Load()
+ if resetAfter > tn {
+ return c.counter.Inc()
+ }
+
+ c.counter.Store(1)
+
+ newResetAfter := tn + tick.Nanoseconds()
+ if !c.resetAt.CAS(resetAfter, newResetAfter) {
+ // We raced with another goroutine trying to reset, and it also reset
+ // the counter to 1, so we need to reincrement the counter.
+ return c.counter.Inc()
+ }
+
+ return 1
+}
+
+// SamplingDecision is a decision represented as a bit field made by sampler.
+// More decisions may be added in the future.
+type SamplingDecision uint32
+
+const (
+ // LogDropped indicates that the Sampler dropped a log entry.
+ LogDropped SamplingDecision = 1 << iota
+ // LogSampled indicates that the Sampler sampled a log entry.
+ LogSampled
+)
+
+// optionFunc wraps a func so it satisfies the SamplerOption interface.
+type optionFunc func(*sampler)
+
+func (f optionFunc) apply(s *sampler) {
+ f(s)
+}
+
+// SamplerOption configures a Sampler.
+type SamplerOption interface {
+ apply(*sampler)
+}
+
+// nopSamplingHook is the default hook used by sampler.
+func nopSamplingHook(Entry, SamplingDecision) {}
+
+// SamplerHook registers a function which will be called when Sampler makes a
+// decision.
+//
+// This hook may be used to get visibility into the performance of the sampler.
+// For example, use it to track metrics of dropped versus sampled logs.
+//
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
+func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
+ return optionFunc(func(s *sampler) {
+ s.hook = hook
+ })
+}
+
+// NewSamplerWithOptions creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
+// Sampler can be configured to report sampling decisions with the SamplerHook
+// option.
+//
+// Keep in mind that Zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
+ s := &sampler{
+ Core: core,
+ tick: tick,
+ counts: newCounters(),
+ first: uint64(first),
+ thereafter: uint64(thereafter),
+ hook: nopSamplingHook,
+ }
+ for _, opt := range opts {
+ opt.apply(s)
+ }
+
+ return s
+}
+
+type sampler struct {
+ Core
+
+ counts *counters
+ tick time.Duration
+ first, thereafter uint64
+ hook func(Entry, SamplingDecision)
+}
+
+var (
+ _ Core = (*sampler)(nil)
+ _ leveledEnabler = (*sampler)(nil)
+)
+
+// NewSampler creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// Keep in mind that zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+//
+// Deprecated: use NewSamplerWithOptions.
+func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
+ return NewSamplerWithOptions(core, tick, first, thereafter)
+}
+
+func (s *sampler) Level() Level {
+ return LevelOf(s.Core)
+}
+
+func (s *sampler) With(fields []Field) Core {
+ return &sampler{
+ Core: s.Core.With(fields),
+ tick: s.tick,
+ counts: s.counts,
+ first: s.first,
+ thereafter: s.thereafter,
+ hook: s.hook,
+ }
+}
+
+func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !s.Enabled(ent.Level) {
+ return ce
+ }
+
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
+ }
+ return s.Core.Check(ent, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
new file mode 100644
index 0000000000..9bb32f0557
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -0,0 +1,96 @@
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type multiCore []Core
+
+var (
+ _ leveledEnabler = multiCore(nil)
+ _ Core = multiCore(nil)
+)
+
+// NewTee creates a Core that duplicates log entries into two or more
+// underlying Cores.
+//
+// Calling it with a single Core returns the input unchanged, and calling
+// it with no input returns a no-op Core.
+func NewTee(cores ...Core) Core {
+ switch len(cores) {
+ case 0:
+ return NewNopCore()
+ case 1:
+ return cores[0]
+ default:
+ return multiCore(cores)
+ }
+}
+
+func (mc multiCore) With(fields []Field) Core {
+ clone := make(multiCore, len(mc))
+ for i := range mc {
+ clone[i] = mc[i].With(fields)
+ }
+ return clone
+}
+
+func (mc multiCore) Level() Level {
+ minLvl := _maxLevel // mc is never empty
+ for i := range mc {
+ if lvl := LevelOf(mc[i]); lvl < minLvl {
+ minLvl = lvl
+ }
+ }
+ return minLvl
+}
+
+func (mc multiCore) Enabled(lvl Level) bool {
+ for i := range mc {
+ if mc[i].Enabled(lvl) {
+ return true
+ }
+ }
+ return false
+}
+
+func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ for i := range mc {
+ ce = mc[i].Check(ent, ce)
+ }
+ return ce
+}
+
+func (mc multiCore) Write(ent Entry, fields []Field) error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Write(ent, fields))
+ }
+ return err
+}
+
+func (mc multiCore) Sync() error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Sync())
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go
new file mode 100644
index 0000000000..d4a1af3d07
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "io"
+ "sync"
+
+ "go.uber.org/multierr"
+)
+
+// A WriteSyncer is an io.Writer that can also flush any buffered data. Note
+// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer.
+type WriteSyncer interface {
+ io.Writer
+ Sync() error
+}
+
+// AddSync converts an io.Writer to a WriteSyncer. It attempts to be
+// intelligent: if the concrete type of the io.Writer implements WriteSyncer,
+// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync.
+func AddSync(w io.Writer) WriteSyncer {
+ switch w := w.(type) {
+ case WriteSyncer:
+ return w
+ default:
+ return writerWrapper{w}
+ }
+}
+
+type lockedWriteSyncer struct {
+ sync.Mutex
+ ws WriteSyncer
+}
+
+// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
+// particular, *os.Files must be locked before use.
+func Lock(ws WriteSyncer) WriteSyncer {
+ if _, ok := ws.(*lockedWriteSyncer); ok {
+ // no need to layer on another lock
+ return ws
+ }
+ return &lockedWriteSyncer{ws: ws}
+}
+
+func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
+ s.Lock()
+ n, err := s.ws.Write(bs)
+ s.Unlock()
+ return n, err
+}
+
+func (s *lockedWriteSyncer) Sync() error {
+ s.Lock()
+ err := s.ws.Sync()
+ s.Unlock()
+ return err
+}
+
+type writerWrapper struct {
+ io.Writer
+}
+
+func (w writerWrapper) Sync() error {
+ return nil
+}
+
+type multiWriteSyncer []WriteSyncer
+
+// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
+// and sync calls, much like io.MultiWriter.
+func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
+ if len(ws) == 1 {
+ return ws[0]
+ }
+ return multiWriteSyncer(ws)
+}
+
+// See https://golang.org/src/io/multi.go
+// When not all underlying syncers write the same number of bytes,
+// the smallest number is returned even though Write() is called on
+// all of them.
+func (ws multiWriteSyncer) Write(p []byte) (int, error) {
+ var writeErr error
+ nWritten := 0
+ for _, w := range ws {
+ n, err := w.Write(p)
+ writeErr = multierr.Append(writeErr, err)
+ if nWritten == 0 && n != 0 {
+ nWritten = n
+ } else if n < nWritten {
+ nWritten = n
+ }
+ }
+ return nWritten, writeErr
+}
+
+func (ws multiWriteSyncer) Sync() error {
+ var err error
+ for _, w := range ws {
+ err = multierr.Append(err, w.Sync())
+ }
+ return err
+}
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
index 1473e1296d..781770c204 100644
--- a/vendor/golang.org/x/oauth2/README.md
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples.
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
-## Policy for new packages
+## Policy for new endpoints
We no longer accept new provider-specific packages in this repo if all
they do is add a single endpoint variable. If you just want to add a
@@ -29,8 +29,12 @@ package.
## Report Issues / Send Patches
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
-
The main issue tracker for the oauth2 repository is located at
https://github.com/golang/oauth2/issues.
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html. In particular:
+
+* Excluding trivial changes, all contributions should be connected to an existing issue.
+* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
+* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2).
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index 291df5c833..9085fabe34 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -16,6 +16,7 @@ import (
"net/url"
"strings"
"sync"
+ "time"
"golang.org/x/oauth2/internal"
)
@@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
//
// State is a token to protect the user from CSRF attacks. You must
// always provide a non-empty string and validate that it matches the
-// the state query parameter on your redirect callback.
+// state query parameter on your redirect callback.
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
//
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
@@ -290,6 +291,8 @@ type reuseTokenSource struct {
mu sync.Mutex // guards t
t *Token
+
+ expiryDelta time.Duration
}
// Token returns the current token if it's still valid, else will
@@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
if err != nil {
return nil, err
}
+ t.expiryDelta = s.expiryDelta
s.t = t
return t, nil
}
@@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
new: src,
}
}
+
+// ReuseTokenSource returns a TokenSource that acts in the same manner as the
+// TokenSource returned by ReuseTokenSource, except the expiry buffer is
+// configurable. The expiration time of a token is calculated as
+// t.Expiry.Add(-earlyExpiry).
+func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly, but set the expiryDelta to earlyExpiry,
+ // so the behavior matches what the user expects.
+ rt.expiryDelta = earlyExpiry
+ return rt
+ }
+ src = rt.new
+ }
+ if t != nil {
+ t.expiryDelta = earlyExpiry
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ expiryDelta: earlyExpiry,
+ }
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 822720341a..7c64006de6 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -16,10 +16,10 @@ import (
"golang.org/x/oauth2/internal"
)
-// expiryDelta determines how earlier a token should be considered
+// defaultExpiryDelta determines how earlier a token should be considered
// expired than its actual expiration time. It is used to avoid late
// expirations due to client-server time mismatches.
-const expiryDelta = 10 * time.Second
+const defaultExpiryDelta = 10 * time.Second
// Token represents the credentials used to authorize
// the requests to access protected resources on the OAuth 2.0
@@ -52,6 +52,11 @@ type Token struct {
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
+
+ // expiryDelta is used to calculate when a token is considered
+ // expired, by subtracting from Expiry. If zero, defaultExpiryDelta
+ // is used.
+ expiryDelta time.Duration
}
// Type returns t.TokenType if non-empty, else "Bearer".
@@ -127,6 +132,11 @@ func (t *Token) expired() bool {
if t.Expiry.IsZero() {
return false
}
+
+ expiryDelta := defaultExpiryDelta
+ if t.expiryDelta != 0 {
+ expiryDelta = t.expiryDelta
+ }
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
}
diff --git a/vendor/k8s.io/klog/v2/.gitignore b/vendor/k8s.io/klog/v2/.gitignore
new file mode 100644
index 0000000000..0aa2002392
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/.gitignore
@@ -0,0 +1,17 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# OSX trash
+.DS_Store
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
+.idea/
+*.iml
+
+# Vscode files
+.vscode
diff --git a/vendor/k8s.io/klog/v2/CONTRIBUTING.md b/vendor/k8s.io/klog/v2/CONTRIBUTING.md
new file mode 100644
index 0000000000..2641b1f41b
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
+## Contact Information
+
+- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
diff --git a/vendor/k8s.io/klog/v2/LICENSE b/vendor/k8s.io/klog/v2/LICENSE
new file mode 100644
index 0000000000..37ec93a14f
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS
new file mode 100644
index 0000000000..a2fe8f351b
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/OWNERS
@@ -0,0 +1,14 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+reviewers:
+ - harshanarayana
+ - pohly
+approvers:
+ - dims
+ - thockin
+ - serathius
+emeritus_approvers:
+ - brancz
+ - justinsb
+ - lavalamp
+ - piosz
+ - tallclair
diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md
new file mode 100644
index 0000000000..d45cbe1720
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/README.md
@@ -0,0 +1,118 @@
+klog
+====
+
+klog is a permanent fork of https://github.com/golang/glog.
+
+## Why was klog created?
+
+The decision to create klog was one that wasn't made lightly, but it was necessary due to some
+drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README:
+
+> The code in this repo [...] is not itself under development
+
+This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below:
+
+ * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented.
+ * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it
+ * A long term goal is to implement a logging interface that allows us to add context, change output format, etc.
+
+Historical context is available here:
+
+ * https://github.com/kubernetes/kubernetes/issues/61006
+ * https://github.com/kubernetes/kubernetes/issues/70264
+ * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ
+ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ
+
+## Release versioning
+
+Semantic versioning is used in this repository. It contains several Go modules
+with different levels of stability:
+- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags
+- `examples` - no stable API, no tags, no intention to ever stabilize
+
+Exempt from the API stability guarantee are items (packages, functions, etc.)
+which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those
+may still change in incompatible ways or get removed entirely. This can only
+be used for code that is used in tests to avoid situations where non-test
+code from two different Kubernetes dependencies depends on incompatible
+releases of klog because an experimental API was changed.
+
+----
+
+How to use klog
+===============
+- Replace imports for `"github.com/golang/glog"` with `"k8s.io/klog/v2"`
+- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags
+- You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`)
+- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`)
+- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md))
+- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog).
+
+**NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater.
+
+### Coexisting with klog/v2
+
+See [this example](examples/coexist_klog_v1_and_v2/) to see how to coexist with both klog/v1 and klog/v2.
+
+### Coexisting with glog
+This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and synchronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`.
+
+## Community, discussion, contribution, and support
+
+Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
+
+You can reach the maintainers of this project at:
+
+- [Slack](https://kubernetes.slack.com/messages/klog)
+- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
+
+### Code of conduct
+
+Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
+
+----
+
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+ https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+ Package glog implements logging analogous to the Google-internal
+ C++ INFO/ERROR/V setup. It provides functions Info, Warning,
+ Error, Fatal, plus formatting variants such as Infof. It
+ also provides V-style logging controlled by the -v and
+ -vmodule=file=2 flags.
+
+ Basic examples:
+
+ glog.Info("Prepare to repel boarders")
+
+ glog.Fatalf("Initialization failed: %s", err)
+
+ See the documentation of the V function for an explanation
+ of these examples:
+
+ if glog.V(2) {
+ glog.Info("Starting transaction...")
+ }
+
+ glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/k8s.io/klog/v2/RELEASE.md b/vendor/k8s.io/klog/v2/RELEASE.md
new file mode 100644
index 0000000000..b53eb960ce
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/RELEASE.md
@@ -0,0 +1,9 @@
+# Release Process
+
+The `klog` is released on an as-needed basis. The process is as follows:
+
+1. An issue is proposing a new release with a changelog since the last release
+1. All [OWNERS](OWNERS) must LGTM this release
+1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
+1. The release issue is closed
+1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/k8s.io/klog/v2/SECURITY.md b/vendor/k8s.io/klog/v2/SECURITY.md
new file mode 100644
index 0000000000..2083d44cdf
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/SECURITY.md
@@ -0,0 +1,22 @@
+# Security Policy
+
+## Security Announcements
+
+Join the [kubernetes-security-announce] group for security and vulnerability announcements.
+
+You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss].
+
+## Reporting a Vulnerability
+
+Instructions for reporting a vulnerability can be found on the
+[Kubernetes Security and Disclosure Information] page.
+
+## Supported Versions
+
+Information about supported Kubernetes versions can be found on the
+[Kubernetes version and version skew support policy] page on the Kubernetes website.
+
+[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce
+[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50
+[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions
+[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability
diff --git a/vendor/k8s.io/klog/v2/SECURITY_CONTACTS b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
new file mode 100644
index 0000000000..6128a58699
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/SECURITY_CONTACTS
@@ -0,0 +1,20 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+dims
+thockin
+justinsb
+tallclair
+piosz
+brancz
+DirectXMan12
+lavalamp
diff --git a/vendor/k8s.io/klog/v2/code-of-conduct.md b/vendor/k8s.io/klog/v2/code-of-conduct.md
new file mode 100644
index 0000000000..0d15c00cf3
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go
new file mode 100644
index 0000000000..005513f2a7
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/contextual.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "context"
+
+ "github.com/go-logr/logr"
+)
+
+// This file provides the implementation of
+// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging
+//
+// SetLogger and ClearLogger were originally added to klog.go and got moved
+// here. Contextual logging adds a way to retrieve a Logger for direct logging
+// without the logging calls in klog.go.
+//
+// The global variables are expected to be modified only during sequential
+// parts of a program (init, serial tests) and therefore are not protected by
+// mutex locking.
+
+var (
+ // klogLogger is used as fallback for logging through the normal klog code
+ // when no Logger is set.
+ klogLogger logr.Logger = logr.New(&klogger{})
+)
+
+// SetLogger sets a Logger implementation that will be used as backing
+// implementation of the traditional klog log calls. klog will do its own
+// verbosity checks before calling logger.V().Info. logger.Error is always
+// called, regardless of the klog verbosity settings.
+//
+// If set, all log lines will be suppressed from the regular output, and
+// redirected to the logr implementation.
+// Use as:
+//
+// ...
+// klog.SetLogger(zapr.NewLogger(zapLog))
+//
+// To remove a backing logr implemention, use ClearLogger. Setting an
+// empty logger with SetLogger(logr.Logger{}) does not work.
+//
+// Modifying the logger is not thread-safe and should be done while no other
+// goroutines invoke log calls, usually during program initialization.
+func SetLogger(logger logr.Logger) {
+ SetLoggerWithOptions(logger)
+}
+
+// SetLoggerWithOptions is a more flexible version of SetLogger. Without
+// additional options, it behaves exactly like SetLogger. By passing
+// ContextualLogger(true) as option, it can be used to set a logger that then
+// will also get called directly by applications which retrieve it via
+// FromContext, Background, or TODO.
+//
+// Supporting direct calls is recommended because it avoids the overhead of
+// routing log entries through klogr into klog and then into the actual Logger
+// backend.
+func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) {
+ logging.loggerOptions = loggerOptions{}
+ for _, opt := range opts {
+ opt(&logging.loggerOptions)
+ }
+ logging.logger = &logWriter{
+ Logger: logger,
+ writeKlogBuffer: logging.loggerOptions.writeKlogBuffer,
+ }
+}
+
+// ContextualLogger determines whether the logger passed to
+// SetLoggerWithOptions may also get called directly. Such a logger cannot rely
+// on verbosity checking in klog.
+func ContextualLogger(enabled bool) LoggerOption {
+ return func(o *loggerOptions) {
+ o.contextualLogger = enabled
+ }
+}
+
+// FlushLogger provides a callback for flushing data buffered by the logger.
+func FlushLogger(flush func()) LoggerOption {
+ return func(o *loggerOptions) {
+ o.flush = flush
+ }
+}
+
+// WriteKlogBuffer sets a callback that will be invoked by klog to write output
+// produced by non-structured log calls like Infof.
+//
+// The buffer will contain exactly the same data that klog normally would write
+// into its own output stream(s). In particular this includes the header, if
+// klog is configured to write one. The callback then can divert that data into
+// its own output streams. The buffer may or may not end in a line break.
+//
+// Without such a callback, klog will call the logger's Info or Error method
+// with just the message string (i.e. no header).
+func WriteKlogBuffer(write func([]byte)) LoggerOption {
+ return func(o *loggerOptions) {
+ o.writeKlogBuffer = write
+ }
+}
+
+// LoggerOption implements the functional parameter paradigm for
+// SetLoggerWithOptions.
+type LoggerOption func(o *loggerOptions)
+
+type loggerOptions struct {
+ contextualLogger bool
+ flush func()
+ writeKlogBuffer func([]byte)
+}
+
+// logWriter combines a logger (always set) with a write callback (optional).
+type logWriter struct {
+ Logger
+ writeKlogBuffer func([]byte)
+}
+
+// ClearLogger removes a backing Logger implementation if one was set earlier
+// with SetLogger.
+//
+// Modifying the logger is not thread-safe and should be done while no other
+// goroutines invoke log calls, usually during program initialization.
+func ClearLogger() {
+ logging.logger = nil
+ logging.loggerOptions = loggerOptions{}
+}
+
+// EnableContextualLogging controls whether contextual logging is enabled.
+// By default it is enabled. When disabled, FromContext avoids looking up
+// the logger in the context and always returns the global logger.
+// LoggerWithValues, LoggerWithName, and NewContext become no-ops
+// and return their input logger respectively context. This may be useful
+// to avoid the additional overhead for contextual logging.
+//
+// This must be called during initialization before goroutines are started.
+func EnableContextualLogging(enabled bool) {
+ logging.contextualLoggingEnabled = enabled
+}
+
+// FromContext retrieves a logger set by the caller or, if not set,
+// falls back to the program's global logger (a Logger instance or klog
+// itself).
+func FromContext(ctx context.Context) Logger {
+ if logging.contextualLoggingEnabled {
+ if logger, err := logr.FromContext(ctx); err == nil {
+ return logger
+ }
+ }
+
+ return Background()
+}
+
+// TODO can be used as a last resort by code that has no means of
+// receiving a logger from its caller. FromContext or an explicit logger
+// parameter should be used instead.
+func TODO() Logger {
+ return Background()
+}
+
+// Background retrieves the fallback logger. It should not be called before
+// that logger was initialized by the program and not by code that should
+// better receive a logger via its parameters. TODO can be used as a temporary
+// solution for such code.
+func Background() Logger {
+ if logging.loggerOptions.contextualLogger {
+ // Is non-nil because logging.loggerOptions.contextualLogger is
+ // only true if a logger was set.
+ return logging.logger.Logger
+ }
+
+ return klogLogger
+}
+
+// LoggerWithValues returns logger.WithValues(...kv) when
+// contextual logging is enabled, otherwise the logger.
+func LoggerWithValues(logger Logger, kv ...interface{}) Logger {
+ if logging.contextualLoggingEnabled {
+ return logger.WithValues(kv...)
+ }
+ return logger
+}
+
+// LoggerWithName returns logger.WithName(name) when contextual logging is
+// enabled, otherwise the logger.
+func LoggerWithName(logger Logger, name string) Logger {
+ if logging.contextualLoggingEnabled {
+ return logger.WithName(name)
+ }
+ return logger
+}
+
+// NewContext returns logr.NewContext(ctx, logger) when
+// contextual logging is enabled, otherwise ctx.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ if logging.contextualLoggingEnabled {
+ return logr.NewContext(ctx, logger)
+ }
+ return ctx
+}
diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go
new file mode 100644
index 0000000000..320a147728
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/exit.go
@@ -0,0 +1,69 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+// Copyright 2022 The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package klog
+
+import (
+ "fmt"
+ "os"
+ "time"
+)
+
+var (
+
+ // ExitFlushTimeout is the timeout that klog has traditionally used during
+ // calls like Fatal or Exit when flushing log data right before exiting.
+ // Applications that replace those calls and do not have some specific
+ // requirements like "exit immediately" can use this value as parameter
+ // for FlushAndExit.
+ //
+ // Can be set for testing purpose or to change the application's
+ // default.
+ ExitFlushTimeout = 10 * time.Second
+
+ // OsExit is the function called by FlushAndExit to terminate the program.
+ //
+ // Can be set for testing purpose or to change the application's
+ // default behavior. Note that the function should not simply return
+ // because callers of functions like Fatal will not expect that.
+ OsExit = os.Exit
+)
+
+// FlushAndExit flushes log data for a certain amount of time and then calls
+// os.Exit. Combined with some logging call it provides a replacement for
+// traditional calls like Fatal or Exit.
+func FlushAndExit(flushTimeout time.Duration, exitCode int) {
+ timeoutFlush(flushTimeout)
+ OsExit(exitCode)
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first. This is needed because the hooks invoked
+// by Flush may deadlock when klog.Fatal is called from a hook that holds
+// a lock. Flushing also might take too long.
+func timeoutFlush(timeout time.Duration) {
+ done := make(chan bool, 1)
+ go func() {
+ Flush() // calls logging.lockAndFlushAll()
+ done <- true
+ }()
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout)
+ }
+}
diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go
new file mode 100644
index 0000000000..602c3ed9e6
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/imports.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "github.com/go-logr/logr"
+)
+
+// The reason for providing these aliases is to allow code to work with logr
+// without directly importing it.
+
+// Logger in this package is exactly the same as logr.Logger.
+type Logger = logr.Logger
+
+// LogSink in this package is exactly the same as logr.LogSink.
+type LogSink = logr.LogSink
+
+// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo.
+type RuntimeInfo = logr.RuntimeInfo
+
+var (
+ // New is an alias for logr.New.
+ New = logr.New
+)
diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go
new file mode 100644
index 0000000000..f325ded5e9
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go
@@ -0,0 +1,176 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+// Copyright 2022 The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package buffer provides a cache for byte.Buffer instances that can be reused
+// to avoid frequent allocation and deallocation. It also has utility code
+// for log header formatting that use these buffers.
+package buffer
+
+import (
+ "bytes"
+ "os"
+ "sync"
+ "time"
+
+ "k8s.io/klog/v2/internal/severity"
+)
+
+var (
+ // Pid is inserted into log headers. Can be overridden for tests.
+ Pid = os.Getpid()
+)
+
+// Buffer holds a single byte.Buffer for reuse. The zero value is ready for
+// use. It also provides some helper methods for output formatting.
+type Buffer struct {
+ bytes.Buffer
+ Tmp [64]byte // temporary byte array for creating headers.
+ next *Buffer
+}
+
+var buffers = sync.Pool{
+ New: func() interface{} {
+ return new(Buffer)
+ },
+}
+
+// GetBuffer returns a new, ready-to-use buffer.
+func GetBuffer() *Buffer {
+ b := buffers.Get().(*Buffer)
+ b.Reset()
+ return b
+}
+
+// PutBuffer returns a buffer to the free list.
+func PutBuffer(b *Buffer) {
+ if b.Len() >= 256 {
+ // Let big buffers die a natural death, without relying on
+ // sync.Pool behavior. The documentation implies that items may
+ // get deallocated while stored there ("If the Pool holds the
+ // only reference when this [= be removed automatically]
+ // happens, the item might be deallocated."), but
+ // https://github.com/golang/go/issues/23199 leans more towards
+ // having such a size limit.
+ return
+ }
+
+ buffers.Put(b)
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i].
+func (buf *Buffer) twoDigits(i, d int) {
+ buf.Tmp[i+1] = digits[d%10]
+ d /= 10
+ buf.Tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.Tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *Buffer) nDigits(n, i, d int, pad byte) {
+ j := n - 1
+ for ; j >= 0 && d > 0; j-- {
+ buf.Tmp[i+j] = digits[d%10]
+ d /= 10
+ }
+ for ; j >= 0; j-- {
+ buf.Tmp[i+j] = pad
+ }
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i].
+func (buf *Buffer) someDigits(i, d int) int {
+ // Print into the top, then copy down. We know there's space for at least
+ // a 10-digit number.
+ j := len(buf.Tmp)
+ for {
+ j--
+ buf.Tmp[j] = digits[d%10]
+ d /= 10
+ if d == 0 {
+ break
+ }
+ }
+ return copy(buf.Tmp[i:], buf.Tmp[j:])
+}
+
+// FormatHeader formats a log header using the provided file name and line number
+// and writes it into the buffer.
+func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) {
+ if line < 0 {
+ line = 0 // not a real line number, but acceptable to someDigits
+ }
+ if s > severity.FatalLog {
+ s = severity.InfoLog // for safety.
+ }
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.Tmp[0] = severity.Char[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.Tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.Tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.Tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.Tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.Tmp[21] = ' '
+ buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID
+ buf.Tmp[29] = ' '
+ buf.Write(buf.Tmp[:30])
+ buf.WriteString(file)
+ buf.Tmp[0] = ':'
+ n := buf.someDigits(1, line)
+ buf.Tmp[n+1] = ']'
+ buf.Tmp[n+2] = ' '
+ buf.Write(buf.Tmp[:n+3])
+}
+
+// SprintHeader formats a log header and returns a string. This is a simpler
+// version of FormatHeader for use in ktesting.
+func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string {
+ if s > severity.FatalLog {
+ s = severity.InfoLog // for safety.
+ }
+
+ // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+ // It's worth about 3X. Fprintf is hard.
+ _, month, day := now.Date()
+ hour, minute, second := now.Clock()
+ // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+ buf.Tmp[0] = severity.Char[s]
+ buf.twoDigits(1, int(month))
+ buf.twoDigits(3, day)
+ buf.Tmp[5] = ' '
+ buf.twoDigits(6, hour)
+ buf.Tmp[8] = ':'
+ buf.twoDigits(9, minute)
+ buf.Tmp[11] = ':'
+ buf.twoDigits(12, second)
+ buf.Tmp[14] = '.'
+ buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+ buf.Tmp[21] = ']'
+ return string(buf.Tmp[:22])
+}
diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md
new file mode 100644
index 0000000000..03d692c8f8
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/clock/README.md
@@ -0,0 +1,7 @@
+# Clock
+
+This package provides an interface for time-based operations. It allows
+mocking time for testing.
+
+This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular
+dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog).
diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go
new file mode 100644
index 0000000000..b8b6af5c81
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clock
+
+import "time"
+
+// PassiveClock allows for injecting fake or real clocks into code
+// that needs to read the current time but does not support scheduling
+// activity in the future.
+type PassiveClock interface {
+ Now() time.Time
+ Since(time.Time) time.Duration
+}
+
+// Clock allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type Clock interface {
+ PassiveClock
+ // After returns the channel of a new Timer.
+ // This method does not allow to free/GC the backing timer before it fires. Use
+ // NewTimer instead.
+ After(d time.Duration) <-chan time.Time
+ // NewTimer returns a new Timer.
+ NewTimer(d time.Duration) Timer
+ // Sleep sleeps for the provided duration d.
+ // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
+ Sleep(d time.Duration)
+ // Tick returns the channel of a new Ticker.
+ // This method does not allow to free/GC the backing ticker. Use
+ // NewTicker from WithTicker instead.
+ Tick(d time.Duration) <-chan time.Time
+}
+
+// WithTicker allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type WithTicker interface {
+ Clock
+ // NewTicker returns a new Ticker.
+ NewTicker(time.Duration) Ticker
+}
+
+// WithDelayedExecution allows for injecting fake or real clocks into
+// code that needs to make use of AfterFunc functionality.
+type WithDelayedExecution interface {
+ Clock
+ // AfterFunc executes f in its own goroutine after waiting
+ // for d duration and returns a Timer whose channel can be
+ // closed by calling Stop() on the Timer.
+ AfterFunc(d time.Duration, f func()) Timer
+}
+
+// WithTickerAndDelayedExecution allows for injecting fake or real clocks
+// into code that needs Ticker and AfterFunc functionality
+type WithTickerAndDelayedExecution interface {
+ WithTicker
+ // AfterFunc executes f in its own goroutine after waiting
+ // for d duration and returns a Timer whose channel can be
+ // closed by calling Stop() on the Timer.
+ AfterFunc(d time.Duration, f func()) Timer
+}
+
+// Ticker defines the Ticker interface.
+type Ticker interface {
+ C() <-chan time.Time
+ Stop()
+}
+
+var _ = WithTicker(RealClock{})
+
+// RealClock really calls time.Now()
+type RealClock struct{}
+
+// Now returns the current time.
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+
+// Since returns time since the specified timestamp.
+func (RealClock) Since(ts time.Time) time.Duration {
+ return time.Since(ts)
+}
+
+// After is the same as time.After(d).
+// This method does not allow to free/GC the backing timer before it fires. Use
+// NewTimer instead.
+func (RealClock) After(d time.Duration) <-chan time.Time {
+ return time.After(d)
+}
+
+// NewTimer is the same as time.NewTimer(d)
+func (RealClock) NewTimer(d time.Duration) Timer {
+ return &realTimer{
+ timer: time.NewTimer(d),
+ }
+}
+
+// AfterFunc is the same as time.AfterFunc(d, f).
+func (RealClock) AfterFunc(d time.Duration, f func()) Timer {
+ return &realTimer{
+ timer: time.AfterFunc(d, f),
+ }
+}
+
+// Tick is the same as time.Tick(d)
+// This method does not allow to free/GC the backing ticker. Use
+// NewTicker instead.
+func (RealClock) Tick(d time.Duration) <-chan time.Time {
+ return time.Tick(d)
+}
+
+// NewTicker returns a new Ticker.
+func (RealClock) NewTicker(d time.Duration) Ticker {
+ return &realTicker{
+ ticker: time.NewTicker(d),
+ }
+}
+
+// Sleep is the same as time.Sleep(d)
+// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
+func (RealClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// Timer allows for injecting fake or real timers into code that
+// needs to do arbitrary things based on time.
+type Timer interface {
+ C() <-chan time.Time
+ Stop() bool
+ Reset(d time.Duration) bool
+}
+
+var _ = Timer(&realTimer{})
+
+// realTimer is backed by an actual time.Timer.
+type realTimer struct {
+ timer *time.Timer
+}
+
+// C returns the underlying timer's channel.
+func (r *realTimer) C() <-chan time.Time {
+ return r.timer.C
+}
+
+// Stop calls Stop() on the underlying timer.
+func (r *realTimer) Stop() bool {
+ return r.timer.Stop()
+}
+
+// Reset calls Reset() on the underlying timer.
+func (r *realTimer) Reset(d time.Duration) bool {
+ return r.timer.Reset(d)
+}
+
+type realTicker struct {
+ ticker *time.Ticker
+}
+
+func (r *realTicker) C() <-chan time.Time {
+ return r.ticker.C
+}
+
+func (r *realTicker) Stop() {
+ r.ticker.Stop()
+}
diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go
new file mode 100644
index 0000000000..f27bd14472
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go
@@ -0,0 +1,42 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package dbg provides some helper code for call traces.
+package dbg
+
+import (
+ "runtime"
+)
+
+// Stacks is a wrapper for runtime.Stack that attempts to recover the data for
+// all goroutines or the calling one.
+func Stacks(all bool) []byte {
+ // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+ n := 10000
+ if all {
+ n = 100000
+ }
+ var trace []byte
+ for i := 0; i < 5; i++ {
+ trace = make([]byte, n)
+ nbytes := runtime.Stack(trace, all)
+ if nbytes < len(trace) {
+ return trace[:nbytes]
+ }
+ n *= 2
+ }
+ return trace
+}
diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
new file mode 100644
index 0000000000..1dc81a15fa
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
@@ -0,0 +1,346 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serialize
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/go-logr/logr"
+)
+
+type textWriter interface {
+ WriteText(*bytes.Buffer)
+}
+
+// WithValues implements LogSink.WithValues. The old key/value pairs are
+// assumed to be well-formed, the new ones are checked and padded if
+// necessary. It returns a new slice.
+func WithValues(oldKV, newKV []interface{}) []interface{} {
+ if len(newKV) == 0 {
+ return oldKV
+ }
+ newLen := len(oldKV) + len(newKV)
+ hasMissingValue := newLen%2 != 0
+ if hasMissingValue {
+ newLen++
+ }
+ // The new LogSink must have its own slice.
+ kv := make([]interface{}, 0, newLen)
+ kv = append(kv, oldKV...)
+ kv = append(kv, newKV...)
+ if hasMissingValue {
+ kv = append(kv, missingValue)
+ }
+ return kv
+}
+
+// MergeKVs deduplicates elements provided in two key/value slices.
+//
+// Keys in each slice are expected to be unique, so duplicates can only occur
+// when the first and second slice contain the same key. When that happens, the
+// key/value pair from the second slice is used. The first slice must be well-formed
+// (= even key/value pairs). The second one may have a missing value, in which
+// case the special "missing value" is added to the result.
+func MergeKVs(first, second []interface{}) []interface{} {
+ maxLength := len(first) + (len(second)+1)/2*2
+ if maxLength == 0 {
+ // Nothing to do at all.
+ return nil
+ }
+
+ if len(first) == 0 && len(second)%2 == 0 {
+ // Nothing to be overridden, second slice is well-formed
+ // and can be used directly.
+ return second
+ }
+
+ // Determine which keys are in the second slice so that we can skip
+ // them when iterating over the first one. The code intentionally
+ // favors performance over completeness: we assume that keys are string
+ // constants and thus compare equal when the string values are equal. A
+ // string constant being overridden by, for example, a fmt.Stringer is
+ // not handled.
+ overrides := map[interface{}]bool{}
+ for i := 0; i < len(second); i += 2 {
+ overrides[second[i]] = true
+ }
+ merged := make([]interface{}, 0, maxLength)
+ for i := 0; i+1 < len(first); i += 2 {
+ key := first[i]
+ if overrides[key] {
+ continue
+ }
+ merged = append(merged, key, first[i+1])
+ }
+ merged = append(merged, second...)
+ if len(merged)%2 != 0 {
+ merged = append(merged, missingValue)
+ }
+ return merged
+}
+
+type Formatter struct {
+ AnyToStringHook AnyToStringFunc
+}
+
+type AnyToStringFunc func(v interface{}) string
+
+// MergeKVsInto is a variant of MergeKVs which directly formats the key/value
+// pairs into a buffer.
+func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
+ if len(first) == 0 && len(second) == 0 {
+ // Nothing to do at all.
+ return
+ }
+
+ if len(first) == 0 && len(second)%2 == 0 {
+ // Nothing to be overridden, second slice is well-formed
+ // and can be used directly.
+ for i := 0; i < len(second); i += 2 {
+ f.KVFormat(b, second[i], second[i+1])
+ }
+ return
+ }
+
+ // Determine which keys are in the second slice so that we can skip
+ // them when iterating over the first one. The code intentionally
+ // favors performance over completeness: we assume that keys are string
+ // constants and thus compare equal when the string values are equal. A
+ // string constant being overridden by, for example, a fmt.Stringer is
+ // not handled.
+ overrides := map[interface{}]bool{}
+ for i := 0; i < len(second); i += 2 {
+ overrides[second[i]] = true
+ }
+ for i := 0; i < len(first); i += 2 {
+ key := first[i]
+ if overrides[key] {
+ continue
+ }
+ f.KVFormat(b, key, first[i+1])
+ }
+ // Round down.
+ l := len(second)
+ l = l / 2 * 2
+ for i := 1; i < l; i += 2 {
+ f.KVFormat(b, second[i-1], second[i])
+ }
+ if len(second)%2 == 1 {
+ f.KVFormat(b, second[len(second)-1], missingValue)
+ }
+}
+
+func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
+ Formatter{}.MergeAndFormatKVs(b, first, second)
+}
+
+const missingValue = "(MISSING)"
+
+// KVListFormat serializes all key/value pairs into the provided buffer.
+// A space gets inserted before the first pair and between each pair.
+func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
+ for i := 0; i < len(keysAndValues); i += 2 {
+ var v interface{}
+ k := keysAndValues[i]
+ if i+1 < len(keysAndValues) {
+ v = keysAndValues[i+1]
+ } else {
+ v = missingValue
+ }
+ f.KVFormat(b, k, v)
+ }
+}
+
+func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
+ Formatter{}.KVListFormat(b, keysAndValues...)
+}
+
+// KVFormat serializes one key/value pair into the provided buffer.
+// A space gets inserted before the pair.
+func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) {
+ b.WriteByte(' ')
+ // Keys are assumed to be well-formed according to
+ // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
+ // for the sake of performance. Keys with spaces,
+ // special characters, etc. will break parsing.
+ if sK, ok := k.(string); ok {
+ // Avoid one allocation when the key is a string, which
+ // normally it should be.
+ b.WriteString(sK)
+ } else {
+ b.WriteString(fmt.Sprintf("%s", k))
+ }
+
+ // The type checks are sorted so that more frequently used ones
+ // come first because that is then faster in the common
+ // cases. In Kubernetes, ObjectRef (a Stringer) is more common
+ // than plain strings
+ // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
+ switch v := v.(type) {
+ case textWriter:
+ writeTextWriterValue(b, v)
+ case fmt.Stringer:
+ writeStringValue(b, true, StringerToString(v))
+ case string:
+ writeStringValue(b, true, v)
+ case error:
+ writeStringValue(b, true, ErrorToString(v))
+ case logr.Marshaler:
+ value := MarshalerToValue(v)
+ // A marshaler that returns a string is useful for
+ // delayed formatting of complex values. We treat this
+ // case like a normal string. This is useful for
+ // multi-line support.
+ //
+ // We could do this by recursively formatting a value,
+ // but that comes with the risk of infinite recursion
+ // if a marshaler returns itself. Instead we call it
+ // only once and rely on it returning the intended
+ // value directly.
+ switch value := value.(type) {
+ case string:
+ writeStringValue(b, true, value)
+ default:
+ writeStringValue(b, false, f.AnyToString(value))
+ }
+ case []byte:
+ // In https://github.com/kubernetes/klog/pull/237 it was decided
+ // to format byte slices with "%+q". The advantages of that are:
+ // - readable output if the bytes happen to be printable
+ // - non-printable bytes get represented as unicode escape
+ // sequences (\uxxxx)
+ //
+ // The downsides are that we cannot use the faster
+ // strconv.Quote here and that multi-line output is not
+ // supported. If developers know that a byte array is
+ // printable and they want multi-line output, they can
+ // convert the value to string before logging it.
+ b.WriteByte('=')
+ b.WriteString(fmt.Sprintf("%+q", v))
+ default:
+ writeStringValue(b, false, f.AnyToString(v))
+ }
+}
+
+func KVFormat(b *bytes.Buffer, k, v interface{}) {
+ Formatter{}.KVFormat(b, k, v)
+}
+
+// AnyToString is the historic fallback formatter.
+func (f Formatter) AnyToString(v interface{}) string {
+ if f.AnyToStringHook != nil {
+ return f.AnyToStringHook(v)
+ }
+ return fmt.Sprintf("%+v", v)
+}
+
+// StringerToString converts a Stringer to a string,
+// handling panics if they occur.
+func StringerToString(s fmt.Stringer) (ret string) {
+ defer func() {
+ if err := recover(); err != nil {
+ ret = fmt.Sprintf("", err)
+ }
+ }()
+ ret = s.String()
+ return
+}
+
+// MarshalerToValue invokes a marshaler and catches
+// panics.
+func MarshalerToValue(m logr.Marshaler) (ret interface{}) {
+ defer func() {
+ if err := recover(); err != nil {
+ ret = fmt.Sprintf("", err)
+ }
+ }()
+ ret = m.MarshalLog()
+ return
+}
+
+// ErrorToString converts an error to a string,
+// handling panics if they occur.
+func ErrorToString(err error) (ret string) {
+ defer func() {
+ if err := recover(); err != nil {
+ ret = fmt.Sprintf("", err)
+ }
+ }()
+ ret = err.Error()
+ return
+}
+
+func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
+ b.WriteRune('=')
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintf(b, `""`, err)
+ }
+ }()
+ v.WriteText(b)
+}
+
+func writeStringValue(b *bytes.Buffer, quote bool, v string) {
+ data := []byte(v)
+ index := bytes.IndexByte(data, '\n')
+ if index == -1 {
+ b.WriteByte('=')
+ if quote {
+ // Simple string, quote quotation marks and non-printable characters.
+ b.WriteString(strconv.Quote(v))
+ return
+ }
+ // Non-string with no line breaks.
+ b.WriteString(v)
+ return
+ }
+
+ // Complex multi-line string, show as-is with indention like this:
+ // I... "hello world" key=<
+ // line 1
+ // line 2
+ // >
+ //
+ // Tabs indent the lines of the value while the end of string delimiter
+ // is indented with a space. That has two purposes:
+ // - visual difference between the two for a human reader because indention
+ // will be different
+ // - no ambiguity when some value line starts with the end delimiter
+ //
+ // One downside is that the output cannot distinguish between strings that
+ // end with a line break and those that don't because the end delimiter
+ // will always be on the next line.
+ b.WriteString("=<\n")
+ for index != -1 {
+ b.WriteByte('\t')
+ b.Write(data[0 : index+1])
+ data = data[index+1:]
+ index = bytes.IndexByte(data, '\n')
+ }
+ if len(data) == 0 {
+ // String ended with line break, don't add another.
+ b.WriteString(" >")
+ } else {
+ // No line break at end of last line, write rest of string and
+ // add one.
+ b.WriteByte('\t')
+ b.Write(data)
+ b.WriteString("\n >")
+ }
+}
diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go
new file mode 100644
index 0000000000..30fa1834f0
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go
@@ -0,0 +1,58 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+// Copyright 2022 The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package severity provides definitions for klog severity (info, warning, ...)
+package severity
+
+import (
+ "strings"
+)
+
+// severity identifies the sort of log: info, warning etc. The binding to flag.Value
+// is handled in klog.go
+type Severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+ InfoLog Severity = iota
+ WarningLog
+ ErrorLog
+ FatalLog
+ NumSeverity = 4
+)
+
+// Char contains one shortcut letter per severity level.
+const Char = "IWEF"
+
+// Name contains one name per severity level.
+var Name = []string{
+ InfoLog: "INFO",
+ WarningLog: "WARNING",
+ ErrorLog: "ERROR",
+ FatalLog: "FATAL",
+}
+
+// ByName looks up a severity level by name.
+func ByName(s string) (Severity, bool) {
+ s = strings.ToUpper(s)
+ for i, name := range Name {
+ if name == s {
+ return Severity(i), true
+ }
+ }
+ return 0, false
+}
diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go
new file mode 100644
index 0000000000..ecd3f8b690
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/k8s_references.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/go-logr/logr"
+)
+
+// ObjectRef references a kubernetes object
+type ObjectRef struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace,omitempty"`
+}
+
+func (ref ObjectRef) String() string {
+ if ref.Namespace != "" {
+ var builder strings.Builder
+ builder.Grow(len(ref.Namespace) + len(ref.Name) + 1)
+ builder.WriteString(ref.Namespace)
+ builder.WriteRune('/')
+ builder.WriteString(ref.Name)
+ return builder.String()
+ }
+ return ref.Name
+}
+
+func (ref ObjectRef) WriteText(out *bytes.Buffer) {
+ out.WriteRune('"')
+ ref.writeUnquoted(out)
+ out.WriteRune('"')
+}
+
+func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) {
+ if ref.Namespace != "" {
+ out.WriteString(ref.Namespace)
+ out.WriteRune('/')
+ }
+ out.WriteString(ref.Name)
+}
+
+// MarshalLog ensures that loggers with support for structured output will log
+// as a struct by removing the String method via a custom type.
+func (ref ObjectRef) MarshalLog() interface{} {
+ type or ObjectRef
+ return or(ref)
+}
+
+var _ logr.Marshaler = ObjectRef{}
+
+// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+// this interface may expand in the future, but will always be a subset of the
+// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+type KMetadata interface {
+ GetName() string
+ GetNamespace() string
+}
+
+// KObj returns ObjectRef from ObjectMeta
+func KObj(obj KMetadata) ObjectRef {
+ if obj == nil {
+ return ObjectRef{}
+ }
+ if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() {
+ return ObjectRef{}
+ }
+
+ return ObjectRef{
+ Name: obj.GetName(),
+ Namespace: obj.GetNamespace(),
+ }
+}
+
+// KRef returns ObjectRef from name and namespace
+func KRef(namespace, name string) ObjectRef {
+ return ObjectRef{
+ Name: name,
+ Namespace: namespace,
+ }
+}
+
+// KObjs returns slice of ObjectRef from an slice of ObjectMeta
+//
+// DEPRECATED: Use KObjSlice instead, it has better performance.
+func KObjs(arg interface{}) []ObjectRef {
+ s := reflect.ValueOf(arg)
+ if s.Kind() != reflect.Slice {
+ return nil
+ }
+ objectRefs := make([]ObjectRef, 0, s.Len())
+ for i := 0; i < s.Len(); i++ {
+ if v, ok := s.Index(i).Interface().(KMetadata); ok {
+ objectRefs = append(objectRefs, KObj(v))
+ } else {
+ return nil
+ }
+ }
+ return objectRefs
+}
+
+// KObjSlice takes a slice of objects that implement the KMetadata interface
+// and returns an object that gets logged as a slice of ObjectRef values or a
+// string containing those values, depending on whether the logger prefers text
+// output or structured output.
+//
+// An error string is logged when KObjSlice is not passed a suitable slice.
+//
+// Processing of the argument is delayed until the value actually gets logged,
+// in contrast to KObjs where that overhead is incurred regardless of whether
+// the result is needed.
+func KObjSlice(arg interface{}) interface{} {
+ return kobjSlice{arg: arg}
+}
+
+type kobjSlice struct {
+ arg interface{}
+}
+
+var _ fmt.Stringer = kobjSlice{}
+var _ logr.Marshaler = kobjSlice{}
+
+func (ks kobjSlice) String() string {
+ objectRefs, errStr := ks.process()
+ if errStr != "" {
+ return errStr
+ }
+ return fmt.Sprintf("%v", objectRefs)
+}
+
+func (ks kobjSlice) MarshalLog() interface{} {
+ objectRefs, errStr := ks.process()
+ if errStr != "" {
+ return errStr
+ }
+ return objectRefs
+}
+
+func (ks kobjSlice) process() (objs []interface{}, err string) {
+ s := reflect.ValueOf(ks.arg)
+ switch s.Kind() {
+ case reflect.Invalid:
+ // nil parameter, print as nil.
+ return nil, ""
+ case reflect.Slice:
+ // Okay, handle below.
+ default:
+ return nil, fmt.Sprintf("", ks.arg)
+ }
+ objectRefs := make([]interface{}, 0, s.Len())
+ for i := 0; i < s.Len(); i++ {
+ item := s.Index(i).Interface()
+ if item == nil {
+ objectRefs = append(objectRefs, nil)
+ } else if v, ok := item.(KMetadata); ok {
+ objectRefs = append(objectRefs, KObj(v))
+ } else {
+ return nil, fmt.Sprintf("", item)
+ }
+ }
+ return objectRefs, ""
+}
+
+var nilToken = []byte("")
+
+func (ks kobjSlice) WriteText(out *bytes.Buffer) {
+ s := reflect.ValueOf(ks.arg)
+ switch s.Kind() {
+ case reflect.Invalid:
+ // nil parameter, print as empty slice.
+ out.WriteString("[]")
+ return
+ case reflect.Slice:
+ // Okay, handle below.
+ default:
+ fmt.Fprintf(out, `""`, ks.arg)
+ return
+ }
+ out.Write([]byte{'['})
+ defer out.Write([]byte{']'})
+ for i := 0; i < s.Len(); i++ {
+ if i > 0 {
+ out.Write([]byte{' '})
+ }
+ item := s.Index(i).Interface()
+ if item == nil {
+ out.Write(nilToken)
+ } else if v, ok := item.(KMetadata); ok {
+ KObj(v).writeUnquoted(out)
+ } else {
+ fmt.Fprintf(out, "", item)
+ return
+ }
+ }
+}
diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
new file mode 100644
index 0000000000..466eeaf265
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog.go
@@ -0,0 +1,1702 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+// klog.Info("Prepare to repel boarders")
+//
+// klog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+// if klog.V(2) {
+// klog.Info("Starting transaction...")
+// }
+//
+// klog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to standard error.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+// -logtostderr=true
+// Logs are written to standard error instead of to files.
+// This shortcuts most of the usual output routing:
+// -alsologtostderr, -stderrthreshold and -log_dir have no
+// effect and output redirection at runtime with SetOutput is
+// ignored.
+// -alsologtostderr=false
+// Logs are written to standard error as well as to files.
+// -stderrthreshold=ERROR
+// Log events at or above this severity are logged to standard
+// error as well as to files.
+// -log_dir=""
+// Log files will be written to this directory instead of the
+// default temporary directory.
+//
+// Other flags provide aids to debugging.
+//
+// -log_backtrace_at=""
+// When set to a file and line number holding a logging statement,
+// such as
+// -log_backtrace_at=gopherflakes.go:234
+// a stack trace will be written to the Info log whenever execution
+// hits that statement. (Unlike with -vmodule, the ".go" must be
+// present.)
+// -v=0
+// Enable V-leveled logging at the specified level.
+// -vmodule=""
+// The syntax of the argument is a comma-separated list of pattern=N,
+// where pattern is a literal file name (minus the ".go" suffix) or
+// "glob" pattern and N is a V level. For instance,
+// -vmodule=gopher*=3
+// sets the V level to 3 in all Go files whose names begin "gopher".
+package klog
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ stdLog "log"
+ "math"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "k8s.io/klog/v2/internal/buffer"
+ "k8s.io/klog/v2/internal/clock"
+ "k8s.io/klog/v2/internal/dbg"
+ "k8s.io/klog/v2/internal/serialize"
+ "k8s.io/klog/v2/internal/severity"
+)
+
+// severityValue identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severityValue struct {
+ severity.Severity
+}
+
+// get returns the value of the severity.
+func (s *severityValue) get() severity.Severity {
+ return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity)))
+}
+
+// set sets the value of the severity.
+func (s *severityValue) set(val severity.Severity) {
+ atomic.StoreInt32((*int32)(&s.Severity), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severityValue) String() string {
+ return strconv.FormatInt(int64(s.Severity), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (s *severityValue) Get() interface{} {
+ return s.Severity
+}
+
+// Set is part of the flag.Value interface.
+func (s *severityValue) Set(value string) error {
+ var threshold severity.Severity
+ // Is it a known name?
+ if v, ok := severity.ByName(value); ok {
+ threshold = v
+ } else {
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ threshold = severity.Severity(v)
+ }
+ logging.stderrThreshold.set(threshold)
+ return nil
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+ lines int64
+ bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+ return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+ return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+ Info, Warning, Error OutputStats
+}
+
+var severityStats = [severity.NumSeverity]*OutputStats{
+ severity.InfoLog: &Stats.Info,
+ severity.WarningLog: &Stats.Warning,
+ severity.ErrorLog: &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+ return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+ atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+ return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(Level(v), logging.vmodule.filter, false)
+ return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+ filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+ pattern string
+ literal bool // The pattern is a literal string
+ level Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+ if m.literal {
+ return file == m.pattern
+ }
+ match, _ := filepath.Match(m.pattern, file)
+ return match
+}
+
+func (m *moduleSpec) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return m.serialize()
+}
+
+func (m *moduleSpec) serialize() string {
+ var b bytes.Buffer
+ for i, f := range m.filter {
+ if i > 0 {
+ b.WriteRune(',')
+ }
+ fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+ }
+ return b.String()
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+ return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Set will sets module value
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+ filter, err := parseModuleSpec(value)
+ if err != nil {
+ return err
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ logging.setVState(logging.verbosity, filter, true)
+ return nil
+}
+
+func parseModuleSpec(value string) ([]modulePat, error) {
+ var filter []modulePat
+ for _, pat := range strings.Split(value, ",") {
+ if len(pat) == 0 {
+ // Empty strings such as from a trailing comma can be ignored.
+ continue
+ }
+ patLev := strings.Split(pat, "=")
+ if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+ return nil, errVmoduleSyntax
+ }
+ pattern := patLev[0]
+ v, err := strconv.ParseInt(patLev[1], 10, 32)
+ if err != nil {
+ return nil, errors.New("syntax error: expect comma-separated list of filename=N")
+ }
+ if v < 0 {
+ return nil, errors.New("negative value for vmodule level")
+ }
+ if v == 0 {
+ continue // Ignore. It's harmless but no point in paying the overhead.
+ }
+ // TODO: check syntax of filter?
+ filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+ }
+ return filter, nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+ return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+ file string
+ line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+ return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+ if t.line != line {
+ return false
+ }
+ if i := strings.LastIndex(file, "/"); i >= 0 {
+ file = file[i+1:]
+ }
+ return t.file == file
+}
+
+func (t *traceLocation) String() string {
+ // Lock because the type is not atomic. TODO: clean this up.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+ return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Set will sets backtrace value
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+ if value == "" {
+ // Unset.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = 0
+ t.file = ""
+ return nil
+ }
+ fields := strings.Split(value, ":")
+ if len(fields) != 2 {
+ return errTraceSyntax
+ }
+ file, line := fields[0], fields[1]
+ if !strings.Contains(file, ".") {
+ return errTraceSyntax
+ }
+ v, err := strconv.Atoi(line)
+ if err != nil {
+ return errTraceSyntax
+ }
+ if v <= 0 {
+ return errors.New("negative or zero value for level")
+ }
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ t.line = v
+ t.file = file
+ return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+ Flush() error
+ Sync() error
+ io.Writer
+}
+
+var logging loggingT
+var commandLine flag.FlagSet
+
+// init sets up the defaults and creates command line flags.
+func init() {
+ commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)")
+ commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)")
+ commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800,
+ "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+
+ "If the value is 0, the maximum file size is unlimited.")
+ commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files")
+ commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)")
+ logging.setVState(0, nil, false)
+ commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity")
+ commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages")
+ commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
+ commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)")
+ commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)")
+ logging.stderrThreshold = severityValue{
+ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR.
+ }
+ commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)")
+ commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+ commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+ logging.settings.contextualLoggingEnabled = true
+ logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil)
+}
+
+// InitFlags is for explicitly initializing the flags.
+// It may get called repeatedly for different flagsets, but not
+// twice for the same one. May get called concurrently
+// to other goroutines using klog. However, only some flags
+// may get set concurrently (see implementation).
+func InitFlags(flagset *flag.FlagSet) {
+ if flagset == nil {
+ flagset = flag.CommandLine
+ }
+
+ commandLine.VisitAll(func(f *flag.Flag) {
+ flagset.Var(f.Value, f.Name, f.Usage)
+ })
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+ logging.lockAndFlushAll()
+}
+
+// settings collects global settings.
+type settings struct {
+ // contextualLoggingEnabled controls whether contextual logging is
+ // active. Disabling it may have some small performance benefit.
+ contextualLoggingEnabled bool
+
+ // logger is the global Logger chosen by users of klog, nil if
+ // none is available.
+ logger *logWriter
+
+ // loggerOptions contains the options that were supplied for
+ // globalLogger.
+ loggerOptions loggerOptions
+
+ // Boolean flags. Not handled atomically because the flag.Value interface
+ // does not let us avoid the =true, and that shorthand is necessary for
+ // compatibility. TODO: does this matter enough to fix? Seems unlikely.
+ toStderr bool // The -logtostderr flag.
+ alsoToStderr bool // The -alsologtostderr flag.
+
+ // Level flag. Handled atomically.
+ stderrThreshold severityValue // The -stderrthreshold flag.
+
+ // Access to all of the following fields must be protected via a mutex.
+
+ // file holds writer for each of the log types.
+ file [severity.NumSeverity]flushSyncWriter
+ // flushInterval is the interval for periodic flushing. If zero,
+ // the global default will be used.
+ flushInterval time.Duration
+
+ // filterLength stores the length of the vmodule filter chain. If greater
+ // than zero, it means vmodule is enabled. It may be read safely
+ // using sync.LoadInt32, but is only modified under mu.
+ filterLength int32
+ // traceLocation is the state of the -log_backtrace_at flag.
+ traceLocation traceLocation
+ // These flags are modified only under lock, although verbosity may be fetched
+ // safely using atomic.LoadInt32.
+ vmodule moduleSpec // The state of the -vmodule flag.
+ verbosity Level // V logging level, the value of the -v flag/
+
+ // If non-empty, overrides the choice of directory in which to write logs.
+ // See createLogDirs for the full list of possible destinations.
+ logDir string
+
+ // If non-empty, specifies the path of the file to write logs. mutually exclusive
+ // with the log_dir option.
+ logFile string
+
+ // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
+ // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
+ logFileMaxSizeMB uint64
+
+ // If true, do not add the prefix headers, useful when used with SetOutput
+ skipHeaders bool
+
+ // If true, do not add the headers to log files
+ skipLogHeaders bool
+
+ // If true, add the file directory to the header
+ addDirHeader bool
+
+ // If true, messages will not be propagated to lower severity log levels
+ oneOutput bool
+
+ // If set, all output will be filtered through the filter.
+ filter LogFilter
+}
+
+// deepCopy creates a copy that doesn't share anything with the original
+// instance.
+func (s settings) deepCopy() settings {
+ // vmodule is a slice and would be shared, so we have copy it.
+ filter := make([]modulePat, len(s.vmodule.filter))
+ for i := range s.vmodule.filter {
+ filter[i] = s.vmodule.filter[i]
+ }
+ s.vmodule.filter = filter
+
+ if s.logger != nil {
+ logger := *s.logger
+ s.logger = &logger
+ }
+
+ return s
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+ settings
+
+ // flushD holds a flushDaemon that frequently flushes log file buffers.
+ // Uses its own mutex.
+ flushD *flushDaemon
+
+ // mu protects the remaining elements of this structure and the fields
+ // in settingsT which need a mutex lock.
+ mu sync.Mutex
+
+ // pcs is used in V to avoid an allocation when computing the caller's PC.
+ pcs [1]uintptr
+ // vmap is a cache of the V Level for each V() call site, identified by PC.
+ // It is wiped whenever the vmodule flag changes state.
+ vmap map[uintptr]Level
+}
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+ // Turn verbosity off so V will not fire while we are in transition.
+ l.verbosity.set(0)
+ // Ditto for filter length.
+ atomic.StoreInt32(&l.filterLength, 0)
+
+ // Set the new filters and wipe the pc->Level map if the filter has changed.
+ if setFilter {
+ l.vmodule.filter = filter
+ l.vmap = make(map[uintptr]Level)
+ }
+
+ // Things are consistent now, so enable filtering and verbosity.
+ // They are enabled in order opposite to that in V.
+ atomic.StoreInt32(&l.filterLength, int32(len(filter)))
+ l.verbosity.set(verbosity)
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+// CaptureState gathers information about all current klog settings.
+// The result can be used to restore those settings.
+func CaptureState() State {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ return &state{
+ settings: logging.settings.deepCopy(),
+ flushDRunning: logging.flushD.isRunning(),
+ maxSize: MaxSize,
+ }
+}
+
+// State stores a snapshot of klog settings. It gets created with CaptureState
+// and can be used to restore the entire state. Modifying individual settings
+// is supported via the command line flags.
+type State interface {
+ // Restore restore the entire state. It may get called more than once.
+ Restore()
+}
+
+type state struct {
+ settings
+
+ flushDRunning bool
+ maxSize uint64
+}
+
+func (s *state) Restore() {
+ // This needs to be done before mutex locking.
+ if s.flushDRunning && !logging.flushD.isRunning() {
+ // This is not quite accurate: StartFlushDaemon might
+ // have been called with some different interval.
+ interval := s.flushInterval
+ if interval == 0 {
+ interval = flushInterval
+ }
+ logging.flushD.run(interval)
+ } else if !s.flushDRunning && logging.flushD.isRunning() {
+ logging.flushD.stop()
+ }
+
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+
+ logging.settings = s.settings
+ logging.setVState(s.verbosity, s.vmodule.filter, true)
+ MaxSize = s.maxSize
+}
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+
+ Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+
+where the fields are defined as follows:
+
+ L A single character, representing the log level (eg 'I' for INFO)
+ mm The month (zero padded; ie May is '05')
+ dd The day (zero padded)
+ hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
+ threadid The space-padded thread ID as returned by GetTID()
+ file The file name
+ line The line number
+ msg The user-supplied message
+*/
+func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) {
+ _, file, line, ok := runtime.Caller(3 + depth)
+ if !ok {
+ file = "???"
+ line = 1
+ } else {
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ path := file
+ file = path[slash+1:]
+ if l.addDirHeader {
+ if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 {
+ file = path[dirsep+1:]
+ }
+ }
+ }
+ }
+ return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
+ buf := buffer.GetBuffer()
+ if l.skipHeaders {
+ return buf
+ }
+ now := timeNow()
+ buf.FormatHeader(s, file, line, now)
+ return buf
+}
+
+func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
+ l.printlnDepth(s, logger, filter, 1, args...)
+}
+
+func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ // If a logger is set and doesn't support writing a formatted buffer,
+ // we clear the generated header as we rely on the backing
+ // logger implementation to print headers.
+ if logger != nil && logger.writeKlogBuffer == nil {
+ buffer.PutBuffer(buf)
+ buf = buffer.GetBuffer()
+ }
+ if filter != nil {
+ args = filter.Filter(args)
+ }
+ fmt.Fprintln(buf, args...)
+ l.output(s, logger, buf, depth, file, line, false)
+}
+
+func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
+ l.printDepth(s, logger, filter, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ // If a logger is set and doesn't support writing a formatted buffer,
+ // we clear the generated header as we rely on the backing
+ // logger implementation to print headers.
+ if logger != nil && logger.writeKlogBuffer == nil {
+ buffer.PutBuffer(buf)
+ buf = buffer.GetBuffer()
+ }
+ if filter != nil {
+ args = filter.Filter(args)
+ }
+ fmt.Fprint(buf, args...)
+ if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logger, buf, depth, file, line, false)
+}
+
+func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) {
+ l.printfDepth(s, logger, filter, 1, format, args...)
+}
+
+func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) {
+ buf, file, line := l.header(s, depth)
+ // If a logger is set and doesn't support writing a formatted buffer,
+ // we clear the generated header as we rely on the backing
+ // logger implementation to print headers.
+ if logger != nil && logger.writeKlogBuffer == nil {
+ buffer.PutBuffer(buf)
+ buf = buffer.GetBuffer()
+ }
+ if filter != nil {
+ format, args = filter.FilterF(format, args)
+ }
+ fmt.Fprintf(buf, format, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logger, buf, depth, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number. If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
+ buf := l.formatHeader(s, file, line)
+ // If a logger is set and doesn't support writing a formatted buffer,
+ // we clear the generated header as we rely on the backing
+ // logger implementation to print headers.
+ if logger != nil && logger.writeKlogBuffer == nil {
+ buffer.PutBuffer(buf)
+ buf = buffer.GetBuffer()
+ }
+ if filter != nil {
+ args = filter.Filter(args)
+ }
+ fmt.Fprint(buf, args...)
+ if buf.Bytes()[buf.Len()-1] != '\n' {
+ buf.WriteByte('\n')
+ }
+ l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr)
+}
+
+// if loggr is specified, will call loggr.Error, otherwise output with logging module.
+func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
+ if filter != nil {
+ msg, keysAndValues = filter.FilterS(msg, keysAndValues)
+ }
+ if logger != nil {
+ logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...)
+ return
+ }
+ l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...)
+}
+
+// if loggr is specified, will call loggr.Info, otherwise output with logging module.
+func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
+ if filter != nil {
+ msg, keysAndValues = filter.FilterS(msg, keysAndValues)
+ }
+ if logger != nil {
+ logger.WithCallDepth(depth+2).Info(msg, keysAndValues...)
+ return
+ }
+ l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...)
+}
+
+// printS is called from infoS and errorS if loggr is not specified.
+// set log severity by s
+func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
+ // Only create a new buffer if we don't have one cached.
+ b := buffer.GetBuffer()
+ // The message is always quoted, even if it contains line breaks.
+ // If developers want multi-line output, they should use a small, fixed
+ // message and put the multi-line output into a value.
+ b.WriteString(strconv.Quote(msg))
+ if err != nil {
+ serialize.KVListFormat(&b.Buffer, "err", err)
+ }
+ serialize.KVListFormat(&b.Buffer, keysAndValues...)
+ l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
+ // Make the buffer available for reuse.
+ buffer.PutBuffer(b)
+}
+
+// redirectBuffer is used to set an alternate destination for the logs
+type redirectBuffer struct {
+ w io.Writer
+}
+
+func (rb *redirectBuffer) Sync() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Flush() error {
+ return nil
+}
+
+func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
+ return rb.w.Write(bytes)
+}
+
+// SetOutput sets the output destination for all severities
+func SetOutput(w io.Writer) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ for s := severity.FatalLog; s >= severity.InfoLog; s-- {
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[s] = rb
+ }
+}
+
+// SetOutputBySeverity sets the output destination for specific severity
+func SetOutputBySeverity(name string, w io.Writer) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ sev, ok := severity.ByName(name)
+ if !ok {
+ panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
+ }
+ rb := &redirectBuffer{
+ w: w,
+ }
+ logging.file[sev] = rb
+}
+
+// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
+func LogToStderr(stderr bool) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+
+ logging.toStderr = stderr
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) {
+ var isLocked = true
+ l.mu.Lock()
+ defer func() {
+ if isLocked {
+ // Unlock before returning in case that it wasn't done already.
+ l.mu.Unlock()
+ }
+ }()
+
+ if l.traceLocation.isSet() {
+ if l.traceLocation.match(file, line) {
+ buf.Write(dbg.Stacks(false))
+ }
+ }
+ data := buf.Bytes()
+ if logger != nil {
+ if logger.writeKlogBuffer != nil {
+ logger.writeKlogBuffer(data)
+ } else {
+ // TODO: set 'severity' and caller information as structured log info
+ // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
+ if s == severity.ErrorLog {
+ logger.WithCallDepth(depth+3).Error(nil, string(data))
+ } else {
+ logger.WithCallDepth(depth + 3).Info(string(data))
+ }
+ }
+ } else if l.toStderr {
+ os.Stderr.Write(data)
+ } else {
+ if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+ os.Stderr.Write(data)
+ }
+
+ if logging.logFile != "" {
+ // Since we are using a single log file, all of the items in l.file array
+ // will point to the same file, so just use one of them to write data.
+ if l.file[severity.InfoLog] == nil {
+ if err := l.createFiles(severity.InfoLog); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+ l.file[severity.InfoLog].Write(data)
+ } else {
+ if l.file[s] == nil {
+ if err := l.createFiles(s); err != nil {
+ os.Stderr.Write(data) // Make sure the message appears somewhere.
+ l.exit(err)
+ }
+ }
+
+ if l.oneOutput {
+ l.file[s].Write(data)
+ } else {
+ switch s {
+ case severity.FatalLog:
+ l.file[severity.FatalLog].Write(data)
+ fallthrough
+ case severity.ErrorLog:
+ l.file[severity.ErrorLog].Write(data)
+ fallthrough
+ case severity.WarningLog:
+ l.file[severity.WarningLog].Write(data)
+ fallthrough
+ case severity.InfoLog:
+ l.file[severity.InfoLog].Write(data)
+ }
+ }
+ }
+ }
+ if s == severity.FatalLog {
+ // If we got here via Exit rather than Fatal, print no stacks.
+ if atomic.LoadUint32(&fatalNoStacks) > 0 {
+ l.mu.Unlock()
+ isLocked = false
+ timeoutFlush(ExitFlushTimeout)
+ OsExit(1)
+ }
+ // Dump all goroutine stacks before exiting.
+ // First, make sure we see the trace for the current goroutine on standard error.
+ // If -logtostderr has been specified, the loop below will do that anyway
+ // as the first stack in the full dump.
+ if !l.toStderr {
+ os.Stderr.Write(dbg.Stacks(false))
+ }
+
+ // Write the stack trace for all goroutines to the files.
+ trace := dbg.Stacks(true)
+ logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+ for log := severity.FatalLog; log >= severity.InfoLog; log-- {
+ if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+ f.Write(trace)
+ }
+ }
+ l.mu.Unlock()
+ isLocked = false
+ timeoutFlush(ExitFlushTimeout)
+ OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+ }
+ buffer.PutBuffer(buf)
+
+ if stats := severityStats[s]; stats != nil {
+ atomic.AddInt64(&stats.lines, 1)
+ atomic.AddInt64(&stats.bytes, int64(len(data)))
+ }
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+ fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+ // If logExitFunc is set, we do that instead of exiting.
+ if logExitFunc != nil {
+ logExitFunc(err)
+ return
+ }
+ l.flushAll()
+ OsExit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+ logger *loggingT
+ *bufio.Writer
+ file *os.File
+ sev severity.Severity
+ nbytes uint64 // The number of bytes written to this file
+ maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
+}
+
+func (sb *syncBuffer) Sync() error {
+ return sb.file.Sync()
+}
+
+// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
+func CalculateMaxSize() uint64 {
+ if logging.logFile != "" {
+ if logging.logFileMaxSizeMB == 0 {
+ // If logFileMaxSizeMB is zero, we don't have limitations on the log size.
+ return math.MaxUint64
+ }
+ // Flag logFileMaxSizeMB is in MB for user convenience.
+ return logging.logFileMaxSizeMB * 1024 * 1024
+ }
+ // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
+ return MaxSize
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+ if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
+ if err := sb.rotateFile(time.Now(), false); err != nil {
+ sb.logger.exit(err)
+ }
+ }
+ n, err = sb.Writer.Write(p)
+ sb.nbytes += uint64(n)
+ if err != nil {
+ sb.logger.exit(err)
+ }
+ return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
+ if sb.file != nil {
+ sb.Flush()
+ sb.file.Close()
+ }
+ var err error
+ sb.file, _, err = create(severity.Name[sb.sev], now, startup)
+ if err != nil {
+ return err
+ }
+ if startup {
+ fileInfo, err := sb.file.Stat()
+ if err != nil {
+ return fmt.Errorf("file stat could not get fileinfo: %v", err)
+ }
+ // init file size
+ sb.nbytes = uint64(fileInfo.Size())
+ } else {
+ sb.nbytes = 0
+ }
+ sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+ if sb.logger.skipLogHeaders {
+ return nil
+ }
+
+ // Write header.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+ fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+ fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+ n, err := sb.file.Write(buf.Bytes())
+ sb.nbytes += uint64(n)
+ return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity.Severity) error {
+ interval := l.flushInterval
+ if interval == 0 {
+ interval = flushInterval
+ }
+ l.flushD.run(interval)
+ now := time.Now()
+ // Files are created in decreasing severity order, so as soon as we find one
+ // has already been created, we can stop.
+ for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- {
+ sb := &syncBuffer{
+ logger: l,
+ sev: s,
+ maxbytes: CalculateMaxSize(),
+ }
+ if err := sb.rotateFile(now, true); err != nil {
+ return err
+ }
+ l.file[s] = sb
+ }
+ return nil
+}
+
+const flushInterval = 5 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+type flushDaemon struct {
+ mu sync.Mutex
+ clock clock.WithTicker
+ flush func()
+ stopC chan struct{}
+ stopDone chan struct{}
+}
+
+// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a
+// clock.RealClock is used.
+func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon {
+ if tickClock == nil {
+ tickClock = clock.RealClock{}
+ }
+ return &flushDaemon{
+ flush: flush,
+ clock: tickClock,
+ }
+}
+
+// run starts a goroutine that periodically calls the daemons flush function.
+// Calling run on an already running daemon will have no effect.
+func (f *flushDaemon) run(interval time.Duration) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.stopC != nil { // daemon already running
+ return
+ }
+
+ f.stopC = make(chan struct{}, 1)
+ f.stopDone = make(chan struct{}, 1)
+
+ ticker := f.clock.NewTicker(interval)
+ go func() {
+ defer ticker.Stop()
+ defer func() { f.stopDone <- struct{}{} }()
+ for {
+ select {
+ case <-ticker.C():
+ f.flush()
+ case <-f.stopC:
+ f.flush()
+ return
+ }
+ }
+ }()
+}
+
+// stop stops the running flushDaemon and waits until the daemon has shut down.
+// Calling stop on a daemon that isn't running will have no effect.
+func (f *flushDaemon) stop() {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.stopC == nil { // daemon not running
+ return
+ }
+
+ f.stopC <- struct{}{}
+ <-f.stopDone
+
+ f.stopC = nil
+ f.stopDone = nil
+}
+
+// isRunning returns true if the flush daemon is running.
+func (f *flushDaemon) isRunning() bool {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.stopC != nil
+}
+
+// StopFlushDaemon stops the flush daemon, if running, and flushes once.
+// This prevents klog from leaking goroutines on shutdown. After stopping
+// the daemon, you can still manually flush buffers again by calling Flush().
+func StopFlushDaemon() {
+ logging.flushD.stop()
+}
+
+// StartFlushDaemon ensures that the flush daemon runs with the given delay
+// between flush calls. If it is already running, it gets restarted.
+func StartFlushDaemon(interval time.Duration) {
+ StopFlushDaemon()
+ logging.flushD.run(interval)
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+ l.mu.Lock()
+ l.flushAll()
+ l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+ // Flush from fatal down, in case there's trouble flushing.
+ for s := severity.FatalLog; s >= severity.InfoLog; s-- {
+ file := l.file[s]
+ if file != nil {
+ file.Flush() // ignore error
+ file.Sync() // ignore error
+ }
+ }
+ if logging.loggerOptions.flush != nil {
+ logging.loggerOptions.flush()
+ }
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities. Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+ sev, ok := severity.ByName(name)
+ if !ok {
+ panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+ }
+ // Set a log format that captures the user's file and line:
+ // d.go:23: message
+ stdLog.SetFlags(stdLog.Lshortfile)
+ stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity.Severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+ var (
+ file = "???"
+ line = 1
+ text string
+ )
+ // Split "d.go:23: message" into "d.go", "23", and "message".
+ if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+ text = fmt.Sprintf("bad log format: %s", b)
+ } else {
+ file = string(parts[0])
+ text = string(parts[2][1:]) // skip leading space
+ line, err = strconv.Atoi(string(parts[1]))
+ if err != nil {
+ text = fmt.Sprintf("bad line number: %s", b)
+ line = 1
+ }
+ }
+ // printWithFileLine with alsoToStderr=true, so standard log messages
+ // always appear on standard error.
+ logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text)
+ return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+ fn := runtime.FuncForPC(pc)
+ file, _ := fn.FileLine(pc)
+ // The file is something like /a/b/c/d.go. We want just the d.
+ if strings.HasSuffix(file, ".go") {
+ file = file[:len(file)-3]
+ }
+ if slash := strings.LastIndex(file, "/"); slash >= 0 {
+ file = file[slash+1:]
+ }
+ for _, filter := range l.vmodule.filter {
+ if filter.match(file) {
+ l.vmap[pc] = filter.level
+ return filter.level
+ }
+ }
+ l.vmap[pc] = 0
+ return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose struct {
+ enabled bool
+ logger *logWriter
+}
+
+func newVerbose(level Level, b bool) Verbose {
+ if logging.logger == nil {
+ return Verbose{b, nil}
+ }
+ v := logging.logger.V(int(level))
+ return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}}
+}
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a struct of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//
+// if klog.V(2).Enabled() { klog.Info("log this") }
+//
+// or
+//
+// klog.V(2).Info("log this")
+//
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and -vmodule flags; both are off by default. The V call will log if its level
+// is less than or equal to the value of the -v flag, or alternatively if its level is
+// less than or equal to the value of the -vmodule pattern matching the source file
+// containing the call.
+func V(level Level) Verbose {
+ return VDepth(1, level)
+}
+
+// VDepth is a variant of V that accepts a number of stack frames that will be
+// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to
+// V().
+func VDepth(depth int, level Level) Verbose {
+ // This function tries hard to be cheap unless there's work to do.
+ // The fast path is two atomic loads and compares.
+
+ // Here is a cheap but safe test to see if V logging is enabled globally.
+ if logging.verbosity.get() >= level {
+ return newVerbose(level, true)
+ }
+
+ // It's off globally but vmodule may still be set.
+ // Here is another cheap but safe test to see if vmodule is enabled.
+ if atomic.LoadInt32(&logging.filterLength) > 0 {
+ // Now we need a proper lock to use the logging structure. The pcs field
+ // is shared so we must lock before accessing it. This is fairly expensive,
+ // but if V logging is enabled we're slow anyway.
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+ if runtime.Callers(2+depth, logging.pcs[:]) == 0 {
+ return newVerbose(level, false)
+ }
+ // runtime.Callers returns "return PCs", but we want
+ // to look up the symbolic information for the call,
+ // so subtract 1 from the PC. runtime.CallersFrames
+ // would be cleaner, but allocates.
+ pc := logging.pcs[0] - 1
+ v, ok := logging.vmap[pc]
+ if !ok {
+ v = logging.setV(pc)
+ }
+ return newVerbose(level, v >= level)
+ }
+ return newVerbose(level, false)
+}
+
+// Enabled will return true if this log level is enabled, guarded by the value
+// of v.
+// See the documentation of V for usage.
+func (v Verbose) Enabled() bool {
+ return v.enabled
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+ if v.enabled {
+ logging.print(severity.InfoLog, v.logger, logging.filter, args...)
+ }
+}
+
+// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoDepth(depth int, args ...interface{}) {
+ if v.enabled {
+ logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
+ }
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+ if v.enabled {
+ logging.println(severity.InfoLog, v.logger, logging.filter, args...)
+ }
+}
+
+// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfolnDepth(depth int, args ...interface{}) {
+ if v.enabled {
+ logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
+ }
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+ if v.enabled {
+ logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...)
+ }
+}
+
+// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) {
+ if v.enabled {
+ logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...)
+ }
+}
+
+// InfoS is equivalent to the global InfoS function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
+ if v.enabled {
+ logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...)
+ }
+}
+
+// InfoSDepth acts as InfoS but uses depth to determine which call frame to log.
+// InfoSDepth(0, "msg") is the same as InfoS("msg").
+func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
+ logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...)
+}
+
+// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
+ if v.enabled {
+ logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...)
+ }
+}
+
+// Deprecated: Use ErrorS instead.
+func (v Verbose) Error(err error, msg string, args ...interface{}) {
+ if v.enabled {
+ logging.errorS(err, v.logger, logging.filter, 0, msg, args...)
+ }
+}
+
+// ErrorS is equivalent to the global Error function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) {
+ if v.enabled {
+ logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...)
+ }
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+ logging.print(severity.InfoLog, logging.logger, logging.filter, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Infoln(args ...interface{}) {
+ logging.println(severity.InfoLog, logging.logger, logging.filter, args...)
+}
+
+// InfolnDepth acts as Infoln but uses depth to determine which call frame to log.
+// InfolnDepth(0, "msg") is the same as Infoln("msg").
+func InfolnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+ logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...)
+}
+
+// InfofDepth acts as Infof but uses depth to determine which call frame to log.
+// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...).
+func InfofDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// InfoS structured logs to the INFO log.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready")
+// output:
+// >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready"
+func InfoS(msg string, keysAndValues ...interface{}) {
+ logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+ logging.print(severity.WarningLog, logging.logger, logging.filter, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Warningln(args ...interface{}) {
+ logging.println(severity.WarningLog, logging.logger, logging.filter, args...)
+}
+
+// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log.
+// WarninglnDepth(0, "msg") is the same as Warningln("msg").
+func WarninglnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+ logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...)
+}
+
+// WarningfDepth acts as Warningf but uses depth to determine which call frame to log.
+// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...).
+func WarningfDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+ logging.print(severity.ErrorLog, logging.logger, logging.filter, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Errorln(args ...interface{}) {
+ logging.println(severity.ErrorLog, logging.logger, logging.filter, args...)
+}
+
+// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log.
+// ErrorlnDepth(0, "msg") is the same as Errorln("msg").
+func ErrorlnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+ logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...)
+}
+
+// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log.
+// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...).
+func ErrorfDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// ErrorS structured logs to the ERROR, WARNING, and INFO logs.
+// the err argument used as "err" field of log line.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.ErrorS(err, "Failed to update pod status")
+// output:
+// >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout"
+func ErrorS(err error, msg string, keysAndValues ...interface{}) {
+ logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...)
+}
+
+// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log.
+// ErrorSDepth(0, "msg") is the same as ErrorS("msg").
+func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) {
+ logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// prints stack trace(s), then calls OsExit(255).
+//
+// Stderr only receives a dump of the current goroutine's stack trace. Log files,
+// if there are any, receive a dump of the stack traces in all goroutines.
+//
+// Callers who want more control over handling of fatal events may instead use a
+// combination of different functions:
+// - some info or error logging function, optionally with a stack trace
+// value generated by github.com/go-logr/lib/dbg.Backtrace
+// - Flush to flush pending log data
+// - panic, os.Exit or returning to the caller with an error
+//
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+ logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+ logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls OsExit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Fatalln(args ...interface{}) {
+ logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log.
+// FatallnDepth(0, "msg") is the same as Fatalln("msg").
+func FatallnDepth(depth int, args ...interface{}) {
+ logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls OsExit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+ logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
+}
+
+// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log.
+// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...).
+func FatalfDepth(depth int, format string, args ...interface{}) {
+ logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.print(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
+func Exitln(args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.println(severity.FatalLog, logging.logger, logging.filter, args...)
+}
+
+// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log.
+// ExitlnDepth(0, "msg") is the same as Exitln("msg").
+func ExitlnDepth(depth int, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...)
+}
+
+// ExitfDepth acts as Exitf but uses depth to determine which call frame to log.
+// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...).
+func ExitfDepth(depth int, format string, args ...interface{}) {
+ atomic.StoreUint32(&fatalNoStacks, 1)
+ logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...)
+}
+
+// LogFilter is a collection of functions that can filter all logging calls,
+// e.g. for sanitization of arguments and prevent accidental leaking of secrets.
+type LogFilter interface {
+ Filter(args []interface{}) []interface{}
+ FilterF(format string, args []interface{}) (string, []interface{})
+ FilterS(msg string, keysAndValues []interface{}) (string, []interface{})
+}
+
+// SetLogFilter installs a filter that is used for all log calls.
+//
+// Modifying the filter is not thread-safe and should be done while no other
+// goroutines invoke log calls, usually during program initialization.
+func SetLogFilter(filter LogFilter) {
+ logging.filter = filter
+}
diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go
new file mode 100644
index 0000000000..1025d644f3
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog_file.go
@@ -0,0 +1,130 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package klog
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+func createLogDirs() {
+ if logging.logDir != "" {
+ logDirs = append(logDirs, logging.logDir)
+ }
+ logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+ pid = os.Getpid()
+ program = filepath.Base(os.Args[0])
+ host = "unknownhost"
+ userName = "unknownuser"
+ userNameOnce sync.Once
+)
+
+func init() {
+ if h, err := os.Hostname(); err == nil {
+ host = shortHostname(h)
+ }
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+ if i := strings.Index(hostname, "."); i >= 0 {
+ return hostname[:i]
+ }
+ return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+ name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+ program,
+ host,
+ getUserName(),
+ tag,
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ t.Second(),
+ pid)
+ return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
+ if logging.logFile != "" {
+ f, err := openOrCreate(logging.logFile, startup)
+ if err == nil {
+ return f, logging.logFile, nil
+ }
+ return nil, "", fmt.Errorf("log: unable to create log: %v", err)
+ }
+ onceLogDirs.Do(createLogDirs)
+ if len(logDirs) == 0 {
+ return nil, "", errors.New("log: no log dirs")
+ }
+ name, link := logName(tag, t)
+ var lastErr error
+ for _, dir := range logDirs {
+ fname := filepath.Join(dir, name)
+ f, err := openOrCreate(fname, startup)
+ if err == nil {
+ symlink := filepath.Join(dir, link)
+ os.Remove(symlink) // ignore err
+ os.Symlink(name, symlink) // ignore err
+ return f, fname, nil
+ }
+ lastErr = err
+ }
+ return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
+
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func openOrCreate(name string, startup bool) (*os.File, error) {
+ if startup {
+ f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ return f, err
+ }
+ f, err := os.Create(name)
+ return f, err
+}
diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go
new file mode 100644
index 0000000000..aa46726851
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog_file_others.go
@@ -0,0 +1,19 @@
+//go:build !windows
+// +build !windows
+
+package klog
+
+import (
+ "os/user"
+)
+
+func getUserName() string {
+ userNameOnce.Do(func() {
+ current, err := user.Current()
+ if err == nil {
+ userName = current.Username
+ }
+ })
+
+ return userName
+}
diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go
new file mode 100644
index 0000000000..2517f9c538
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klog_file_windows.go
@@ -0,0 +1,34 @@
+//go:build windows
+// +build windows
+
+package klog
+
+import (
+ "os"
+ "strings"
+)
+
+func getUserName() string {
+ userNameOnce.Do(func() {
+ // On Windows, the Go 'user' package requires netapi32.dll.
+ // This affects Windows Nano Server:
+ // https://github.com/golang/go/issues/21867
+ // Fallback to using environment variables.
+ u := os.Getenv("USERNAME")
+ if len(u) == 0 {
+ return
+ }
+ // Sanitize the USERNAME since it may contain filepath separators.
+ u = strings.Replace(u, `\`, "_", -1)
+
+ // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME'
+ d := os.Getenv("USERDOMAIN")
+ if len(d) != 0 {
+ userName = d + "_" + u
+ } else {
+ userName = u
+ }
+ })
+
+ return userName
+}
diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go
new file mode 100644
index 0000000000..15de00e21f
--- /dev/null
+++ b/vendor/k8s.io/klog/v2/klogr.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package klog
+
+import (
+ "github.com/go-logr/logr"
+
+ "k8s.io/klog/v2/internal/serialize"
+)
+
+// NewKlogr returns a logger that is functionally identical to
+// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The
+// difference is that it uses a simpler implementation.
+func NewKlogr() Logger {
+ return New(&klogger{})
+}
+
+// klogger is a subset of klogr/klogr.go. It had to be copied to break an
+// import cycle (klogr wants to use klog, and klog wants to use klogr).
+type klogger struct {
+ level int
+ callDepth int
+ prefix string
+ values []interface{}
+}
+
+func (l *klogger) Init(info logr.RuntimeInfo) {
+ l.callDepth += info.CallDepth
+}
+
+func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
+ merged := serialize.MergeKVs(l.values, kvList)
+ if l.prefix != "" {
+ msg = l.prefix + ": " + msg
+ }
+ // Skip this function.
+ VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
+}
+
+func (l *klogger) Enabled(level int) bool {
+ // Skip this function and logr.Logger.Info where Enabled is called.
+ return VDepth(l.callDepth+2, Level(level)).Enabled()
+}
+
+func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
+ merged := serialize.MergeKVs(l.values, kvList)
+ if l.prefix != "" {
+ msg = l.prefix + ": " + msg
+ }
+ ErrorSDepth(l.callDepth+1, err, msg, merged...)
+}
+
+// WithName returns a new logr.Logger with the specified name appended. klogr
+// uses '/' characters to separate name elements. Callers should not pass '/'
+// in the provided name string, but this library does not actually enforce that.
+func (l klogger) WithName(name string) logr.LogSink {
+ if len(l.prefix) > 0 {
+ l.prefix = l.prefix + "/"
+ }
+ l.prefix += name
+ return &l
+}
+
+func (l klogger) WithValues(kvList ...interface{}) logr.LogSink {
+ l.values = serialize.WithValues(l.values, kvList)
+ return &l
+}
+
+func (l klogger) WithCallDepth(depth int) logr.LogSink {
+ l.callDepth += depth
+ return &l
+}
+
+var _ logr.LogSink = &klogger{}
+var _ logr.CallDepthLogSink = &klogger{}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 77f6374655..bf2bbac734 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -62,7 +62,7 @@ github.com/containers/common/pkg/flag
github.com/containers/common/pkg/report
github.com/containers/common/pkg/report/camelcase
github.com/containers/common/pkg/retry
-# github.com/containers/image/v5 v5.25.0
+# github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e
## explicit; go 1.18
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -220,7 +220,7 @@ github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
github.com/docker/distribution/registry/api/v2
github.com/docker/distribution/registry/client/auth/challenge
-# github.com/docker/docker v23.0.4+incompatible
+# github.com/docker/docker v23.0.5+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types
@@ -254,12 +254,16 @@ github.com/docker/go-connections/tlsconfig
github.com/docker/go-units
# github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5
## explicit; go 1.9
+# github.com/go-chi/chi v4.1.2+incompatible
+## explicit
+github.com/go-chi/chi
+github.com/go-chi/chi/middleware
# github.com/go-jose/go-jose/v3 v3.0.0
## explicit; go 1.12
github.com/go-jose/go-jose/v3
github.com/go-jose/go-jose/v3/cipher
github.com/go-jose/go-jose/v3/json
-# github.com/go-logr/logr v1.2.3
+# github.com/go-logr/logr v1.2.4
## explicit; go 1.16
github.com/go-logr/logr
github.com/go-logr/logr/funcr
@@ -288,7 +292,7 @@ github.com/go-openapi/jsonreference/internal
# github.com/go-openapi/loads v0.21.2
## explicit; go 1.13
github.com/go-openapi/loads
-# github.com/go-openapi/runtime v0.25.0
+# github.com/go-openapi/runtime v0.26.0
## explicit; go 1.18
github.com/go-openapi/runtime
github.com/go-openapi/runtime/client
@@ -299,7 +303,7 @@ github.com/go-openapi/runtime/middleware/header
github.com/go-openapi/runtime/middleware/untyped
github.com/go-openapi/runtime/security
github.com/go-openapi/runtime/yamlpc
-# github.com/go-openapi/spec v0.20.8
+# github.com/go-openapi/spec v0.20.9
## explicit; go 1.13
github.com/go-openapi/spec
# github.com/go-openapi/strfmt v0.21.7
@@ -318,7 +322,7 @@ github.com/go-playground/locales/currency
# github.com/go-playground/universal-translator v0.18.1
## explicit; go 1.18
github.com/go-playground/universal-translator
-# github.com/go-playground/validator/v10 v10.12.0
+# github.com/go-playground/validator/v10 v10.13.0
## explicit; go 1.18
github.com/go-playground/validator/v10
# github.com/gogo/protobuf v1.3.2
@@ -337,7 +341,7 @@ github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
-# github.com/google/go-containerregistry v0.13.0
+# github.com/google/go-containerregistry v0.14.0
## explicit; go 1.18
github.com/google/go-containerregistry/pkg/name
# github.com/google/go-intervals v0.0.2
@@ -346,6 +350,8 @@ github.com/google/go-intervals/intervalset
# github.com/google/trillian v1.5.1
## explicit; go 1.17
github.com/google/trillian
+github.com/google/trillian/client
+github.com/google/trillian/client/backoff
github.com/google/trillian/types
github.com/google/trillian/types/internal/tls
# github.com/google/uuid v1.3.0
@@ -378,7 +384,7 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.16.4
+# github.com/klauspost/compress v1.16.5
## explicit; go 1.18
github.com/klauspost/compress
github.com/klauspost/compress/flate
@@ -388,10 +394,10 @@ github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8
+# github.com/klauspost/pgzip v1.2.6
## explicit
github.com/klauspost/pgzip
-# github.com/leodido/go-urn v1.2.2
+# github.com/leodido/go-urn v1.2.3
## explicit; go 1.16
github.com/leodido/go-urn
# github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6
@@ -437,8 +443,8 @@ github.com/oklog/ulid
# github.com/opencontainers/go-digest v1.0.0
## explicit; go 1.13
github.com/opencontainers/go-digest
-# github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
-## explicit; go 1.17
+# github.com/opencontainers/image-spec v1.1.0-rc3
+## explicit; go 1.18
github.com/opencontainers/image-spec/schema
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
@@ -483,11 +489,11 @@ github.com/rivo/uniseg
# github.com/segmentio/ksuid v1.0.4
## explicit; go 1.12
github.com/segmentio/ksuid
-# github.com/sigstore/fulcio v1.2.0
+# github.com/sigstore/fulcio v1.3.1
## explicit; go 1.20
github.com/sigstore/fulcio/pkg/api
github.com/sigstore/fulcio/pkg/certificate
-# github.com/sigstore/rekor v1.1.0
+# github.com/sigstore/rekor v1.1.1
## explicit; go 1.19
github.com/sigstore/rekor/pkg/client
github.com/sigstore/rekor/pkg/generated/client
@@ -496,8 +502,9 @@ github.com/sigstore/rekor/pkg/generated/client/index
github.com/sigstore/rekor/pkg/generated/client/pubkey
github.com/sigstore/rekor/pkg/generated/client/tlog
github.com/sigstore/rekor/pkg/generated/models
+github.com/sigstore/rekor/pkg/log
github.com/sigstore/rekor/pkg/util
-# github.com/sigstore/sigstore v1.6.0
+# github.com/sigstore/sigstore v1.6.4
## explicit; go 1.18
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/oauth
@@ -525,7 +532,7 @@ github.com/stefanberger/go-pkcs11uri
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
github.com/stretchr/testify/suite
-# github.com/sylabs/sif/v2 v2.11.1
+# github.com/sylabs/sif/v2 v2.11.3
## explicit; go 1.19
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
@@ -540,6 +547,12 @@ github.com/theupdateframework/go-tuf/encrypted
# github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
## explicit
github.com/titanous/rocacheck
+# github.com/transparency-dev/merkle v0.0.1
+## explicit; go 1.16
+github.com/transparency-dev/merkle
+github.com/transparency-dev/merkle/compact
+github.com/transparency-dev/merkle/proof
+github.com/transparency-dev/merkle/rfc6962
# github.com/ulikunitz/xz v0.5.11
## explicit; go 1.12
github.com/ulikunitz/xz
@@ -551,7 +564,7 @@ github.com/ulikunitz/xz/lzma
github.com/vbatts/tar-split/archive/tar
github.com/vbatts/tar-split/tar/asm
github.com/vbatts/tar-split/tar/storage
-# github.com/vbauerster/mpb/v8 v8.3.0
+# github.com/vbauerster/mpb/v8 v8.4.0
## explicit; go 1.17
github.com/vbauerster/mpb/v8
github.com/vbauerster/mpb/v8/cwriter
@@ -588,8 +601,8 @@ go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/otel v1.13.0
-## explicit; go 1.18
+# go.opentelemetry.io/otel v1.15.0
+## explicit; go 1.19
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/baggage
@@ -601,9 +614,24 @@ go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/internal
go.opentelemetry.io/otel/semconv/v1.12.0
-# go.opentelemetry.io/otel/trace v1.13.0
-## explicit; go 1.18
+# go.opentelemetry.io/otel/trace v1.15.0
+## explicit; go 1.19
go.opentelemetry.io/otel/trace
+# go.uber.org/atomic v1.10.0
+## explicit; go 1.18
+go.uber.org/atomic
+# go.uber.org/multierr v1.11.0
+## explicit; go 1.19
+go.uber.org/multierr
+# go.uber.org/zap v1.24.0
+## explicit; go 1.19
+go.uber.org/zap
+go.uber.org/zap/buffer
+go.uber.org/zap/internal
+go.uber.org/zap/internal/bufferpool
+go.uber.org/zap/internal/color
+go.uber.org/zap/internal/exit
+go.uber.org/zap/zapcore
# golang.org/x/crypto v0.8.0
## explicit; go 1.17
golang.org/x/crypto/cast5
@@ -627,7 +655,7 @@ golang.org/x/crypto/sha3
golang.org/x/exp/constraints
golang.org/x/exp/maps
golang.org/x/exp/slices
-# golang.org/x/mod v0.9.0
+# golang.org/x/mod v0.10.0
## explicit; go 1.17
golang.org/x/mod/semver
golang.org/x/mod/sumdb/note
@@ -642,11 +670,11 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.6.0
+# golang.org/x/oauth2 v0.7.0
## explicit; go 1.17
golang.org/x/oauth2
golang.org/x/oauth2/internal
-# golang.org/x/sync v0.1.0
+# golang.org/x/sync v0.2.0
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
@@ -698,7 +726,7 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4
+# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.54.0
@@ -802,3 +830,11 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
+# k8s.io/klog/v2 v2.90.1
+## explicit; go 1.13
+k8s.io/klog/v2
+k8s.io/klog/v2/internal/buffer
+k8s.io/klog/v2/internal/clock
+k8s.io/klog/v2/internal/dbg
+k8s.io/klog/v2/internal/serialize
+k8s.io/klog/v2/internal/severity